source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
alloc_fail.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// RUN: %libomptarget-compile-nvptx64-nvidia-cuda
// RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 \
// RUN: | %fcheck-nvptx64-nvidia-cuda
// CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{.*}} (8 bytes), but device allocation maps to host at 0x{{.*}} (8 bytes)
// CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer (device failure or illegal mapping).
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
// UNSUPPORTED: clang-11
int main() {
int arr[4] = {0, 1, 2, 3};
#pragma omp target data map(alloc: arr[0:2])
#pragma omp target data map(alloc: arr[1:2])
;
return 0;
}
|
PeptideIndexing.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2020.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h>
#include <OpenMS/DATASTRUCTURES/FASTAContainer.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/DATASTRUCTURES/SeqanIncludeWrapper.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/METADATA/PeptideEvidence.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <atomic>
#include <algorithm>
#include <fstream>
namespace OpenMS
{
/**
@brief Refreshes the protein references for all peptide hits in a vector of PeptideIdentifications and adds target/decoy information.
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy". For proteins the possible values are "target" and "decoy",
depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string) as a suffix or prefix, respectively (see parameter @p prefix).
For peptides, the possible values are "target", "decoy" and "target+decoy", depending on whether the peptide sequence is found only in target proteins,
only in decoy proteins, or in both. The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool.
(For FDR calculations, "target+decoy" peptide hits count as target hits.)
@note Make sure that your protein names in the database contain a correctly formatted decoy string. This can be ensured by using @ref UTILS_DecoyDatabase.
If the decoy identifier is not recognized successfully all proteins will be assumed to stem from the target-part of the query.<br>
E.g., "sw|P33354_DECOY|YEHR_ECOLI Uncharacterized lipop..." is <b>invalid</b>, since the tool has no knowledge of how SwissProt entries are build up.
A correct identifier could be "DECOY_sw|P33354|YEHR_ECOLI Uncharacterized li ..." or "sw|P33354|YEHR_ECOLI_DECOY Uncharacterized li", depending on whether you are
using prefix or suffix annotation.<br>
Some helpful target/decoy statistics will be reported when done.
By default this tool will fail if an unmatched peptide occurs, i.e. if the database does not contain the corresponding protein.
You can force it to return successfully in this case by setting @p '-unmatched_action' to accept or even remove those hits.
Search engines (such as Mascot) will replace ambiguous amino acids ('B', 'J', 'Z' and 'X') in the protein database with unambiguous amino acids in the reported peptides, e.g. exchange 'X' with 'H'.
This will cause such peptides to not be found by exactly matching their sequences to the protein database.
However, we can recover these cases by using tolerant search for ambiguous amino acids in the protein sequence. This is done by default with up to four amino acids
per peptide hit. If you only want exact matches, set @p aaa_max to zero (but expect that unmatched peptides might occur)!
Leucine/Isoleucine:
Further complications can arise due to the presence of the isobaric amino acids isoleucine ('I') and leucine ('L') in protein sequences.
Since the two have the exact same chemical composition and mass, they generally cannot be distinguished by mass spectrometry.
If a peptide containing 'I' was reported as a match for a spectrum, a peptide containing 'L' instead would be an equally good match (and vice versa).
To account for this inherent ambiguity, setting the flag @p IL_equivalent causes 'I' and 'L' to be considered as indistinguishable.@n
For example, if the sequence "PEPTIDE" (matching "Protein1") was identified as a search hit,
but the database additionally contained "PEPTLDE" (matching "Protein2"), running PeptideIndexer with the @p IL_equivalent option would
report both "Protein1" and "Protein2" as accessions for "PEPTIDE".
(This is independent of ambiguous matching via @p aaa_max.)
Additionally, setting this flag will convert all 'J's in any protein sequence to 'I'. This way, no tolerant search is required for 'J' (but is still possible for all
the other ambiguous amino acids).
If @p write_protein_sequences is requested and @p IL_equivalent is set as well, both the I/L-version and unmodified protein sequences need to be stored internally.
This requires some extra memory, roughly equivalent to the size of the FASTA database file itself.
Enzyme specificity:
Once a peptide sequence is found in a protein sequence, this does <b>not</b> imply that the hit is valid! This is where enzyme specificity comes into play.
By default, the enzyme and the specificity used during search is derived from metadata in the idXML files ('auto' setting).
We make two exceptions to any specificity constraints:
1) for peptides starting at the second or third position of a protein are still considered N-terminally specific,
since the residues can be cleaved off in vivo; X!Tandem reports these peptides. For example, the two peptides ABAR and LABAR would both match a protein starting with MLABAR.
2) adventitious cleavage at Asp|Pro (Aspartate/D | Proline/P) is allowed for all enzymes (as supported by X!Tandem), i.e. counts as a proper cleavage site (see http://www.thegpm.org/tandem/release.html).
You can relax the requirements further by choosing <tt>semi-tryptic</tt> (only one of two "internal" termini must match requirements)
or <tt>none</tt> (essentially allowing all hits, no matter their context). These settings should not be used (due to high risk of reporting false positives),
unless the search engine was instructed to search peptides in the same way (but then the default 'auto' setting will do the correct thing).
X!Tandem treats any occurence of 'X' as stop codon (and thus as cleavage site). The resulting peptide will be non- or semi-tryptic.
Those hits will not be matched and need to be removed using @p '-unmatched_action' (do not use termini specificity to cheat around it! It adds more false hits!).
The FASTA file should not contain duplicate protein accessions (since accessions are not validated) if a correct unique-matching annotation is important (target/decoy annotation is still correct).
Threading:
This tool support multiple threads (@p threads option) to speed up computation, at the cost of little extra memory.
*/
class OPENMS_DLLAPI PeptideIndexing :
public DefaultParamHandler, public ProgressLogger
{
public:
/// name of enzyme/specificity which signals that the enzyme/specificity should be taken from meta information
static char const* const AUTO_MODE; /* = 'auto' */
/// Exit codes
enum ExitCodes
{
EXECUTION_OK,
DATABASE_EMPTY,
PEPTIDE_IDS_EMPTY,
ILLEGAL_PARAMETERS,
UNEXPECTED_RESULT
};
/// Action to take when peptide hits could not be matched
enum class Unmatched
{
IS_ERROR, ///< throws an error (and returns no results)
WARN, ///< skips annotation with target/decoy but returns with 'success'
REMOVE, ///< removes unmatched hits entirely and returns with 'success'
SIZE_OF_UNMATCHED
};
static const std::array<std::string, (Size)Unmatched::SIZE_OF_UNMATCHED> names_of_unmatched;
enum class MissingDecoy
{
IS_ERROR,
WARN,
SILENT,
SIZE_OF_MISSING_DECOY
};
static const std::array<std::string, (Size)MissingDecoy::SIZE_OF_MISSING_DECOY> names_of_missing_decoy;
/// Default constructor
PeptideIndexing();
/// Default destructor
~PeptideIndexing() override;
/// forward for old interface and pyOpenMS; use run<T>() for more control
inline ExitCodes run(std::vector<FASTAFile::FASTAEntry>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
FASTAContainer<TFI_Vector> protein_container(proteins);
return run<TFI_Vector>(protein_container, prot_ids, pep_ids);
}
/**
@brief Re-index peptide identifications honoring enzyme cutting rules, ambiguous amino acids and target/decoy hits.
Template parameter 'T' can be either TFI_File or TFI_Vector. If the data is already available, use TFI_Vector and pass the vector.
If the data is still in a FASTA file and its not needed afterwards for additional processing, use TFI_File and pass the filename.
PeptideIndexer refreshes target/decoy information and mapping of peptides to proteins.
The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool. (For FDR calculations, "target+decoy" peptide hits count as target hits.)
PeptideIndexer allows for ambiguous amino acids (B|J|Z|X) in the protein database, but not in the peptide sequences.
For the latter only I/L can be treated as equivalent (see 'IL_equivalent' flag), but 'J' is not allowed.
Enzyme cutting rules and partial specificity can be specified.
Resulting protein hits appear in the order of the FASTA file, except for orphaned proteins, which will appear first with an empty target_decoy metavalue.
Duplicate protein accessions & sequences will not raise a warning, but create multiple hits (PeptideIndexer scans over the FASTA file once for efficiency
reasons, and thus might not see all accessions & sequences at once).
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy".
For proteins the possible values are "target" and "decoy", depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string)
as a suffix or prefix, respectively (see parameter @p prefix).
Peptide hits are annotated with metavalue 'protein_references', and if matched to at least one protein also with metavalue 'target_decoy'.
The possible values for 'target_decoy' are "target", "decoy" and "target+decoy",
depending on whether the peptide sequence is found only in target proteins, only in decoy proteins, or in both. The metavalue is not present, if the peptide is unmatched.
Runtime: PeptideIndexer is usually very fast (loading and storing the data takes the most time) and search speed can be further improved (linearly), but using more threads.
Avoid allowing too many (>=4) ambiguous amino acids if your database contains long stretches of 'X' (exponential search space).
@param proteins A list of proteins -- either read piecewise from a FASTA file or as existing vector of FASTAEntries.
@param prot_ids Resulting protein identifications associated to pep_ids (will be re-written completely)
@param pep_ids Peptide identifications which should be search within @p proteins and then linked to @p prot_ids
@return Exit status codes.
*/
template<typename T>
ExitCodes run(FASTAContainer<T>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
// no decoy string provided? try to deduce from data
if (decoy_string_.empty())
{
auto r = DecoyHelper::findDecoyString(proteins);
proteins.reset();
if (!r.success)
{
r.is_prefix = true;
r.name = "DECOY_";
//OPENMS_LOG_WARN << "Unable to determine decoy string automatically (not enough decoys were detected)! Using default " << (r.is_prefix ? "prefix" : "suffix") << " decoy string '" << r.name << "'\n"
// << "If you think that this is incorrect, please provide a decoy_string and its position manually!" << std::endl;
}
prefix_ = r.is_prefix;
decoy_string_ = r.name;
// decoy string and position was extracted successfully
////OPENMS_LOG_INFO << "Using " << (prefix_ ? "prefix" : "suffix") << " decoy string '" << decoy_string_ << "'" << std::endl;
}
//---------------------------------------------------------------
// parsing parameters, correcting xtandem and MSGFPlus parameters
//---------------------------------------------------------------
ProteaseDigestion enzyme;
if (!enzyme_name_.empty() && (enzyme_name_.compare(AUTO_MODE) != 0))
{ // use param (not empty, not 'auto')
enzyme.setEnzyme(enzyme_name_);
}
else if (!prot_ids.empty() && prot_ids[0].getSearchParameters().digestion_enzyme.getName() != "unknown_enzyme")
{ // take from meta (this assumes all runs used the same enzyme)
//OPENMS_LOG_INFO << "Info: using '" << prot_ids[0].getSearchParameters().digestion_enzyme.getName() << "' as enzyme (obtained from idXML) for digestion." << std::endl;
enzyme.setEnzyme(&prot_ids[0].getSearchParameters().digestion_enzyme);
}
else
{ // fallback
//OPENMS_LOG_WARN << "Warning: Enzyme name neither given nor deduceable from input. Defaulting to Trypsin!" << std::endl;
enzyme.setEnzyme("Trypsin");
}
bool xtandem_fix_parameters = false;
bool msgfplus_fix_parameters = false;
// determine if at least one search engine was xtandem or MSGFPlus to enable special rules
for (const auto& prot_id : prot_ids)
{
String search_engine = prot_id.getOriginalSearchEngineName();
StringUtils::toUpper(search_engine);
//OPENMS_LOG_INFO << "Peptide identification engine: " << search_engine << std::endl;
if (search_engine == "XTANDEM" || prot_id.getSearchParameters().metaValueExists("SE:XTandem")) { xtandem_fix_parameters = true; }
if (search_engine == "MS-GF+" || search_engine == "MSGFPLUS" || prot_id.getSearchParameters().metaValueExists("SE:MS-GF+")) { msgfplus_fix_parameters = true; }
}
// including MSGFPlus -> Trypsin/P as enzyme
if (msgfplus_fix_parameters && enzyme.getEnzymeName() == "Trypsin")
{
//OPENMS_LOG_WARN << "MSGFPlus detected but enzyme cutting rules were set to Trypsin. Correcting to Trypsin/P to cope with special cutting rule in MSGFPlus." << std::endl;
enzyme.setEnzyme("Trypsin/P");
}
//OPENMS_LOG_INFO << "Enzyme: " << enzyme.getEnzymeName() << std::endl;
if (!enzyme_specificity_.empty() && (enzyme_specificity_.compare(AUTO_MODE) != 0))
{ // use param (not empty and not 'auto')
enzyme.setSpecificity(ProteaseDigestion::getSpecificityByName(enzyme_specificity_));
}
else if (!prot_ids.empty() && prot_ids[0].getSearchParameters().enzyme_term_specificity != ProteaseDigestion::SPEC_UNKNOWN)
{ // deduce from data ('auto')
enzyme.setSpecificity(prot_ids[0].getSearchParameters().enzyme_term_specificity);
//OPENMS_LOG_INFO << "Info: using '" << EnzymaticDigestion::NamesOfSpecificity[prot_ids[0].getSearchParameters().enzyme_term_specificity] << "' as enzyme specificity (obtained from idXML) for digestion." << std::endl;
}
else
{ // fallback
//OPENMS_LOG_WARN << "Warning: Enzyme specificity neither given nor present in the input file. Defaulting to 'full'!" << std::endl;
enzyme.setSpecificity(ProteaseDigestion::SPEC_FULL);
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// cache the first proteins
const size_t PROTEIN_CACHE_SIZE = 4e5; // 400k should be enough for most DB's and is not too hard on memory either (~200 MB FASTA)
this->startProgress(0, 1, "Load first DB chunk");
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
this->endProgress();
if (proteins.empty()) // we do not allow an empty database
{
//OPENMS_LOG_ERROR << "Error: An empty database was provided. Mapping makes no sense. Aborting..." << std::endl;
return DATABASE_EMPTY;
}
if (pep_ids.empty()) // Aho-Corasick requires non-empty input; but we allow this case, since the TOPP tool should not crash when encountering a bad raw file (with no PSMs)
{
//OPENMS_LOG_WARN << "Warning: An empty set of peptide identifications was provided. Output will be empty as well." << std::endl;
if (!keep_unreferenced_proteins_)
{
// delete only protein hits, not whole ID runs incl. meta data:
for (std::vector<ProteinIdentification>::iterator it = prot_ids.begin();
it != prot_ids.end(); ++it)
{
it->getHits().clear();
}
}
return PEPTIDE_IDS_EMPTY;
}
FoundProteinFunctor func(enzyme, xtandem_fix_parameters); // store the matches
Map<String, Size> acc_to_prot; // map: accessions --> FASTA protein index
std::vector<bool> protein_is_decoy; // protein index -> is decoy?
std::vector<std::string> protein_accessions; // protein index -> accession
bool invalid_protein_sequence = false; // check for proteins with modifications, i.e. '[' or '(', and throw an exception
{ // new scope - forget data after search
/*
BUILD Peptide DB
*/
bool has_illegal_AAs(false);
AhoCorasickAmbiguous::PeptideDB pep_DB;
for (std::vector<PeptideIdentification>::const_iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
//String run_id = it1->getIdentifier();
const std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::const_iterator it2 = hits.begin(); it2 != hits.end(); ++it2)
{
//
// Warning:
// do not skip over peptides here, since the results are iterated in the same way
//
String seq = it2->getSequence().toUnmodifiedString().remove('*'); // make a copy, i.e. do NOT change the peptide sequence!
if (seqan::isAmbiguous(seqan::AAString(seq.c_str())))
{ // do not quit here, to show the user all sequences .. only quit after loop
//OPENMS_LOG_ERROR << "Peptide sequence '" << it2->getSequence() << "' contains one or more ambiguous amino acids (B|J|Z|X).\n";
has_illegal_AAs = true;
}
if (IL_equivalent_) // convert L to I;
{
seq.substitute('L', 'I');
}
appendValue(pep_DB, seq.c_str());
}
}
if (has_illegal_AAs)
{
//OPENMS_LOG_ERROR << "One or more peptides contained illegal amino acids. This is not allowed!"
// << "\nPlease either remove the peptide or replace it with one of the unambiguous ones (while allowing for ambiguous AA's to match the protein)." << std::endl;;
}
//OPENMS_LOG_INFO << "Mapping " << length(pep_DB) << " peptides to " << (proteins.size() == PROTEIN_CACHE_SIZE ? "? (unknown number of)" : String(proteins.size())) << " proteins." << std::endl;
if (length(pep_DB) == 0)
{ // Aho-Corasick will crash if given empty needles as input
//OPENMS_LOG_WARN << "Warning: Peptide identifications have no hits inside! Output will be empty as well." << std::endl;
return PEPTIDE_IDS_EMPTY;
}
/*
Aho Corasick (fast)
*/
//OPENMS_LOG_INFO << "Searching with up to " << aaa_max_ << " ambiguous amino acid(s) and " << mm_max_ << " mismatch(es)!" << std::endl;
SysInfo::MemUsage mu;
//OPENMS_LOG_INFO << "Building trie ...";
StopWatch s;
s.start();
AhoCorasickAmbiguous::FuzzyACPattern pattern;
AhoCorasickAmbiguous::initPattern(pep_DB, aaa_max_, mm_max_, pattern);
s.stop();
//OPENMS_LOG_INFO << " done (" << int(s.getClockTime()) << "s)" << std::endl;
s.reset();
uint16_t count_j_proteins(0);
bool has_active_data = true; // becomes false if end of FASTA file is reached
const std::string jumpX(aaa_max_ + mm_max_ + 1, 'X'); // jump over stretches of 'X' which cost a lot of time; +1 because AXXA is a valid hit for aaa_max == 2 (cannot split it)
// use very large target value for progress if DB size is unknown (did not fit into first chunk)
this->startProgress(0, proteins.size() == PROTEIN_CACHE_SIZE ? std::numeric_limits<SignedSize>::max() : proteins.size(), "Aho-Corasick");
std::atomic<int> progress_prots(0);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
FoundProteinFunctor func_threads(enzyme, xtandem_fix_parameters);
Map<String, Size> acc_to_prot_thread; // map: accessions --> FASTA protein index
AhoCorasickAmbiguous fuzzyAC;
String prot;
while (true)
{
#pragma omp barrier // all threads need to be here, since we are about to swap protein data
#pragma omp single
{
DEBUG_ONLY std::cerr << " activating cache ...\n";
has_active_data = proteins.activateCache(); // swap in last cache
protein_accessions.resize(proteins.getChunkOffset() + proteins.chunkSize());
} // implicit barrier here
if (!has_active_data) break; // leave while-loop
SignedSize prot_count = (SignedSize)proteins.chunkSize();
#pragma omp master
{
DEBUG_ONLY std::cerr << "Filling Protein Cache ...";
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
protein_is_decoy.resize(proteins.getChunkOffset() + prot_count);
for (SignedSize i = 0; i < prot_count; ++i)
{ // do this in master only, to avoid false sharing
const String& seq = proteins.chunkAt(i).identifier;
protein_is_decoy[i + proteins.getChunkOffset()] = (prefix_ ? seq.hasPrefix(decoy_string_) : seq.hasSuffix(decoy_string_));
}
DEBUG_ONLY std::cerr << " done" << std::endl;
}
DEBUG_ONLY std::cerr << " starting for loop \n";
// search all peptides in each protein
#pragma omp for schedule(dynamic, 100) nowait
for (SignedSize i = 0; i < prot_count; ++i)
{
++progress_prots; // atomic
if (omp_get_thread_num() == 0)
{
this->setProgress(progress_prots);
}
prot = proteins.chunkAt(i).sequence;
prot.remove('*');
// check for invalid sequences with modifications
if (prot.has('[') || prot.has('('))
{
invalid_protein_sequence = true; // not omp-critical because its write-only
// we cannot throw an exception here, since we'd need to catch it within the parallel region
}
// convert L/J to I; also replace 'J' in proteins
if (IL_equivalent_)
{
prot.substitute('L', 'I');
prot.substitute('J', 'I');
}
else
{ // warn if 'J' is found (it eats into aaa_max)
if (prot.has('J'))
{
#pragma omp atomic
++count_j_proteins;
}
}
Size prot_idx = i + proteins.getChunkOffset();
// test if protein was a hit
Size hits_total = func_threads.filter_passed + func_threads.filter_rejected;
// check if there are stretches of 'X'
if (prot.has('X'))
{
// create chunks of the protein (splitting it at stretches of 'X..X') and feed them to AC one by one
size_t offset = -1, start = 0;
while ((offset = prot.find(jumpX, offset + 1)) != std::string::npos)
{
//std::cout << "found X..X at " << offset << " in protein " << proteins[i].identifier << "\n";
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start, offset + jumpX.size() - start), prot, prot_idx, (int)start, func_threads);
// skip ahead while we encounter more X...
while (offset + jumpX.size() < prot.size() && prot[offset + jumpX.size()] == 'X') ++offset;
start = offset;
//std::cout << " new start: " << start << "\n";
}
// last chunk
if (start < prot.size())
{
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start), prot, prot_idx, (int)start, func_threads);
}
}
else
{
addHits_(fuzzyAC, pattern, pep_DB, prot, prot, prot_idx, 0, func_threads);
}
// was protein found?
if (hits_total < func_threads.filter_passed + func_threads.filter_rejected)
{
protein_accessions[prot_idx] = proteins.chunkAt(i).identifier;
acc_to_prot_thread[protein_accessions[prot_idx]] = prot_idx;
}
} // end parallel FOR
// join results again
DEBUG_ONLY std::cerr << " critical now \n";
#ifdef _OPENMP
#pragma omp critical(PeptideIndexer_joinAC)
#endif
{
s.start();
// hits
func.merge(func_threads);
// accession -> index
acc_to_prot.insert(acc_to_prot_thread.begin(), acc_to_prot_thread.end());
acc_to_prot_thread.clear();
s.stop();
} // OMP end critical
} // end readChunk
} // OMP end parallel
this->endProgress();
std::cout << "Merge took: " << s.toString() << "\n";
mu.after();
std::cout << mu.delta("Aho-Corasick") << "\n\n";
//OPENMS_LOG_INFO << "\nAho-Corasick done:\n found " << func.filter_passed << " hits for " << func.pep_to_prot.size() << " of " << length(pep_DB) << " peptides.\n";
// write some stats
//OPENMS_LOG_INFO << "Peptide hits passing enzyme filter: " << func.filter_passed << "\n"
// << " ... rejected by enzyme filter: " << func.filter_rejected << std::endl;
/*if (count_j_proteins)
{
//OPENMS_LOG_WARN << "PeptideIndexer found " << count_j_proteins << " protein sequences in your database containing the amino acid 'J'."
// << "To match 'J' in a protein, an ambiguous amino acid placeholder for I/L will be used.\n"
//<< "This costs runtime and eats into the 'aaa_max' limit, leaving less opportunity for B/Z/X matches.\n"
// << "If you want 'J' to be treated as unambiguous, enable '-IL_equivalent'!" << std::endl;
}*/
} // end local scope
//
// do mapping
//
// index existing proteins
Map<String, Size> runid_to_runidx; // identifier to index
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
runid_to_runidx[prot_ids[run_idx].getIdentifier()] = run_idx;
}
// for peptides --> proteins
Size stats_matched_unique(0);
Size stats_matched_multi(0);
Size stats_unmatched(0); // no match to DB
Size stats_count_m_t(0); // match to Target DB
Size stats_count_m_d(0); // match to Decoy DB
Size stats_count_m_td(0); // match to T+D DB
Map<Size, std::set<Size> > runidx_to_protidx; // in which protID do appear which proteins (according to mapped peptides)
Size pep_idx(0);
for (std::vector<PeptideIdentification>::iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
// which ProteinIdentification does the peptide belong to?
Size run_idx = runid_to_runidx[it1->getIdentifier()];
std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::iterator it_hit = hits.begin(); it_hit != hits.end(); /* no increase here! we might need to skip it; see below */)
{
// clear protein accessions
it_hit->setPeptideEvidences(std::vector<PeptideEvidence>());
//
// is this a decoy hit?
//
bool matches_target(false);
bool matches_decoy(false);
std::set<Size> prot_indices; /// protein hits of this peptide
// add new protein references
for (std::set<PeptideProteinMatchInformation>::const_iterator it_i = func.pep_to_prot[pep_idx].begin();
it_i != func.pep_to_prot[pep_idx].end(); ++it_i)
{
prot_indices.insert(it_i->protein_index);
const String& accession = protein_accessions[it_i->protein_index];
PeptideEvidence pe(accession, it_i->position, it_i->position + (int)it_hit->getSequence().size() - 1, it_i->AABefore, it_i->AAAfter);
it_hit->addPeptideEvidence(pe);
runidx_to_protidx[run_idx].insert(it_i->protein_index); // fill protein hits
if (protein_is_decoy[it_i->protein_index])
{
matches_decoy = true;
}
else
{
matches_target = true;
}
}
++pep_idx; // next hit
if (matches_decoy && matches_target)
{
it_hit->setMetaValue("target_decoy", "target+decoy");
++stats_count_m_td;
}
else if (matches_target)
{
it_hit->setMetaValue("target_decoy", "target");
++stats_count_m_t;
}
else if (matches_decoy)
{
it_hit->setMetaValue("target_decoy", "decoy");
++stats_count_m_d;
} // else: could match to no protein (i.e. both are false)
//else ... // not required (handled below; see stats_unmatched);
if (prot_indices.size() == 1)
{
it_hit->setMetaValue("protein_references", "unique");
++stats_matched_unique;
}
else if (prot_indices.size() > 1)
{
it_hit->setMetaValue("protein_references", "non-unique");
++stats_matched_multi;
}
else
{
++stats_unmatched;
//if (stats_unmatched < 15) OPENMS_LOG_INFO << "Unmatched peptide: " << it_hit->getSequence() << "\n";
//else if (stats_unmatched == 15) OPENMS_LOG_INFO << "Unmatched peptide: ...\n";
if (unmatched_action_ == Unmatched::REMOVE)
{
it_hit = hits.erase(it_hit);
continue; // already points to the next hit
}
else
{
it_hit->setMetaValue("protein_references", "unmatched");
}
}
++it_hit; // next hit
} // all hits
} // next PepID
//Size total_peptides = stats_count_m_t + stats_count_m_d + stats_count_m_td + stats_unmatched;
//OPENMS_LOG_INFO << "-----------------------------------\n";
//OPENMS_LOG_INFO << "Peptide statistics\n";
//OPENMS_LOG_INFO << "\n";
//OPENMS_LOG_INFO << " unmatched : " << stats_unmatched << " (" << stats_unmatched * 100 / total_peptides << " %)\n";
//OPENMS_LOG_INFO << " target/decoy:\n";
//OPENMS_LOG_INFO << " match to target DB only: " << stats_count_m_t << " (" << stats_count_m_t * 100 / total_peptides << " %)\n";
//OPENMS_LOG_INFO << " match to decoy DB only : " << stats_count_m_d << " (" << stats_count_m_d * 100 / total_peptides << " %)\n";
//OPENMS_LOG_INFO << " match to both : " << stats_count_m_td << " (" << stats_count_m_td * 100 / total_peptides << " %)\n";
//OPENMS_LOG_INFO << "\n";
//OPENMS_LOG_INFO << " mapping to proteins:\n";
//OPENMS_LOG_INFO << " no match (to 0 protein) : " << stats_unmatched << "\n";
//OPENMS_LOG_INFO << " unique match (to 1 protein) : " << stats_matched_unique << "\n";
//OPENMS_LOG_INFO << " non-unique match (to >1 protein): " << stats_matched_multi << std::endl;
/// for proteins --> peptides
Size stats_matched_proteins(0), stats_matched_new_proteins(0), stats_orphaned_proteins(0), stats_proteins_target(0), stats_proteins_decoy(0);
// all peptides contain the correct protein hit references, now update the protein hits
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
std::set<Size> masterset = runidx_to_protidx[run_idx]; // all protein matches from above
std::vector<ProteinHit>& phits = prot_ids[run_idx].getHits();
{
// go through existing protein hits and count orphaned proteins (with no peptide hits)
std::vector<ProteinHit> orphaned_hits;
for (std::vector<ProteinHit>::iterator p_hit = phits.begin(); p_hit != phits.end(); ++p_hit)
{
const String& acc = p_hit->getAccession();
if (!acc_to_prot.has(acc)) // acc_to_prot only contains found proteins from current run
{ // old hit is orphaned
++stats_orphaned_proteins;
if (keep_unreferenced_proteins_)
{
p_hit->setMetaValue("target_decoy", "");
orphaned_hits.push_back(*p_hit);
}
}
}
// only keep orphaned hits (if any)
phits = orphaned_hits;
}
// add new protein hits
FASTAFile::FASTAEntry fe;
phits.reserve(phits.size() + masterset.size());
for (std::set<Size>::const_iterator it = masterset.begin(); it != masterset.end(); ++it)
{
ProteinHit hit;
hit.setAccession(protein_accessions[*it]);
if (write_protein_sequence_ || write_protein_description_)
{
proteins.readAt(fe, *it);
if (write_protein_sequence_)
{
hit.setSequence(fe.sequence);
} // no else, since sequence is empty by default
if (write_protein_description_)
{
hit.setDescription(fe.description);
} // no else, since description is empty by default
}
if (protein_is_decoy[*it])
{
hit.setMetaValue("target_decoy", "decoy");
++stats_proteins_decoy;
}
else
{
hit.setMetaValue("target_decoy", "target");
++stats_proteins_target;
}
phits.push_back(hit);
++stats_matched_new_proteins;
}
stats_matched_proteins += phits.size();
}
//OPENMS_LOG_INFO << "-----------------------------------\n";
//OPENMS_LOG_INFO << "Protein statistics\n";
//OPENMS_LOG_INFO << "\n";
//OPENMS_LOG_INFO << " total proteins searched: " << proteins.size() << "\n";
//OPENMS_LOG_INFO << " matched proteins : " << stats_matched_proteins << " (" << stats_matched_new_proteins << " new)\n";
if (stats_matched_proteins)
{ // prevent Division-by-0 Exception
//OPENMS_LOG_INFO << " matched target proteins: " << stats_proteins_target << " (" << stats_proteins_target * 100 / stats_matched_proteins << " %)\n";
//OPENMS_LOG_INFO << " matched decoy proteins : " << stats_proteins_decoy << " (" << stats_proteins_decoy * 100 / stats_matched_proteins << " %)\n";
}
//OPENMS_LOG_INFO << " orphaned proteins : " << stats_orphaned_proteins << (keep_unreferenced_proteins_ ? " (all kept)" : " (all removed)\n");
//OPENMS_LOG_INFO << "-----------------------------------" << std::endl;
/// exit if no peptides were matched to decoy
bool has_error = false;
if (invalid_protein_sequence)
{
//OPENMS_LOG_ERROR << "Error: One or more protein sequences contained the characters '[' or '(', which are illegal in protein sequences."
// << "\nPeptide hits might be masked by these characters (which usually indicate presence of modifications).\n";
has_error = true;
}
if ((stats_count_m_d + stats_count_m_td) == 0)
{
String msg("No peptides were matched to the decoy portion of the database! Did you provide the correct concatenated database? Are your 'decoy_string' (=" + decoy_string_ + ") and 'decoy_string_position' (=" + std::string(param_.getValue("decoy_string_position")) + ") settings correct?");
if (missing_decoy_action_ == MissingDecoy::IS_ERROR)
{
//OPENMS_LOG_ERROR << "Error: " << msg << "\nSet 'missing_decoy_action' to 'warn' if you are sure this is ok!\nAborting ..." << std::endl;
}
else if (missing_decoy_action_ == MissingDecoy::WARN)
{
//OPENMS_LOG_WARN << "Warn: " << msg << "\nSet 'missing_decoy_action' to 'error' if you want to elevate this to an error!" << std::endl;
}
else // silent
{
}
}
if (stats_unmatched > 0)
{
//OPENMS_LOG_ERROR << "PeptideIndexer found unmatched peptides, which could not be associated to a protein.\n";
if (unmatched_action_ == Unmatched::IS_ERROR)
{
//OPENMS_LOG_ERROR
//<< "Potential solutions:\n"
//<< " - check your FASTA database is identical to the search DB (or use 'auto')\n"
//<< " - set 'enzyme:specificity' and 'enzyme:name' to 'auto' to match the parameters of the search engine\n"
//<< " - increase 'aaa_max' to allow more ambiguous amino acids\n"
//<< " - as a last resort: use the 'unmatched_action' option to accept or even remove unmatched peptides\n"
//<< " (note that unmatched peptides cannot be used for FDR calculation or quantification)\n";
has_error = true;
}
else if (unmatched_action_ == Unmatched::WARN)
{
//OPENMS_LOG_ERROR << " Warning: " << stats_unmatched << " unmatched hits have been found, but were not removed!\n"
//<< "These are not annotated with target/decoy information and might lead to issues with downstream tools (such as FDR).\n"
//<< "Switch to '" << names_of_unmatched[(Size)Unmatched::REMOVE] << "' if you want to avoid these problems.\n";
}
else if (unmatched_action_ == Unmatched::REMOVE)
{
//OPENMS_LOG_ERROR << " Warning: " << stats_unmatched <<" unmatched hits have been removed!\n"
// << "Make sure that these hits are actually a violation of the cutting rules by inspecting the database!\n";
if (xtandem_fix_parameters) OPENMS_LOG_ERROR << "Since the results are from X!Tandem, this is probably ok (check anyways).\n";
}
else
{
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
}
if (has_error)
{
//OPENMS_LOG_ERROR << "Result files will be written, but PeptideIndexer will exit with an error code." << std::endl;
return UNEXPECTED_RESULT;
}
return EXECUTION_OK;
}
const String& getDecoyString() const;
bool isPrefix() const;
protected:
struct PeptideProteinMatchInformation
{
OpenMS::Size protein_index; //< index of the protein the peptide is contained in
OpenMS::Int position; //< the position of the peptide in the protein
char AABefore; //< the amino acid after the peptide in the protein
char AAAfter; //< the amino acid before the peptide in the protein
const std::tuple<const Size&, const Int&, const char&, const char&> tie() const
{
return std::tie(protein_index, position, AABefore, AAAfter);
}
bool operator<(const PeptideProteinMatchInformation& other) const
{
return tie() < other.tie();
}
bool operator==(const PeptideProteinMatchInformation& other) const
{
return tie() == other.tie();
}
};
struct FoundProteinFunctor
{
public:
typedef std::map<OpenMS::Size, std::set<PeptideProteinMatchInformation> > MapType;
MapType pep_to_prot; //< peptide index --> protein indices
OpenMS::Size filter_passed; //< number of accepted hits (passing addHit() constraints)
OpenMS::Size filter_rejected; //< number of rejected hits (not passing addHit())
private:
ProteaseDigestion enzyme_;
bool xtandem_; //< are we checking xtandem cleavage rules?
public:
explicit FoundProteinFunctor(const ProteaseDigestion& enzyme, bool xtandem) :
pep_to_prot(), filter_passed(0), filter_rejected(0), enzyme_(enzyme), xtandem_(xtandem)
{
}
void merge(FoundProteinFunctor& other)
{
if (pep_to_prot.empty())
{ // first merge is easy
pep_to_prot.swap(other.pep_to_prot);
}
else
{
for (FoundProteinFunctor::MapType::const_iterator it = other.pep_to_prot.begin(); it != other.pep_to_prot.end(); ++it)
{ // augment set
this->pep_to_prot[it->first].insert(other.pep_to_prot[it->first].begin(), other.pep_to_prot[it->first].end());
}
other.pep_to_prot.clear();
}
// cheap members
this->filter_passed += other.filter_passed;
other.filter_passed = 0;
this->filter_rejected += other.filter_rejected;
other.filter_rejected = 0;
}
void addHit(const OpenMS::Size idx_pep,
const OpenMS::Size idx_prot,
const OpenMS::Size len_pep,
const OpenMS::String& seq_prot,
OpenMS::Int position)
{
//TODO we could read and double-check missed cleavages as well
if (enzyme_.isValidProduct(seq_prot, position, len_pep, true, true, xtandem_))
{
PeptideProteinMatchInformation match
{
idx_prot,
position,
(position == 0) ? PeptideEvidence::N_TERMINAL_AA : seq_prot[position - 1],
(position + len_pep >= seq_prot.size()) ?
PeptideEvidence::C_TERMINAL_AA :
seq_prot[position + len_pep]
};
pep_to_prot[idx_pep].insert(match);
++filter_passed;
}
else
{
//std::cerr << "REJECTED Peptide " << seq_pep << " with hit to protein "
// << seq_prot << " at position " << position << std::endl;
++filter_rejected;
}
}
};
inline void addHits_(AhoCorasickAmbiguous& fuzzyAC, const AhoCorasickAmbiguous::FuzzyACPattern& pattern, const AhoCorasickAmbiguous::PeptideDB& pep_DB, const String& prot, const String& full_prot, SignedSize idx_prot, Int offset, FoundProteinFunctor& func_threads) const
{
fuzzyAC.setProtein(prot);
while (fuzzyAC.findNext(pattern))
{
const seqan::Peptide& tmp_pep = pep_DB[fuzzyAC.getHitDBIndex()];
func_threads.addHit(fuzzyAC.getHitDBIndex(), idx_prot, length(tmp_pep), full_prot, fuzzyAC.getHitProteinPosition() + offset);
}
}
void updateMembers_() override;
String decoy_string_{};
bool prefix_{ false };
MissingDecoy missing_decoy_action_ = MissingDecoy::IS_ERROR;
String enzyme_name_{};
String enzyme_specificity_{};
bool write_protein_sequence_{ false };
bool write_protein_description_{ false };
bool keep_unreferenced_proteins_{ false };
Unmatched unmatched_action_ = Unmatched::IS_ERROR;
bool IL_equivalent_{ false };
Int aaa_max_{0};
Int mm_max_{0};
};
}
|
GB_unop__identity_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_uint64)
// op(A') function: GB (_unop_tran__identity_int64_uint64)
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_uint64)
(
int64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ne_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint32)
// A*D function (colscale): GB (_AxD__ne_uint32)
// D*A function (rowscale): GB (_DxB__ne_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint32)
// C=scalar+B GB (_bind1st__ne_uint32)
// C=scalar+B' GB (_bind1st_tran__ne_uint32)
// C=A+scalar GB (_bind2nd__ne_uint32)
// C=A'+scalar GB (_bind2nd_tran__ne_uint32)
// C type: bool
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_uint64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint16
// op(A') function: GB_tran__abs_uint64_uint16
// C type: uint64_t
// A type: uint16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint16
(
uint64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/memory_.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/resample.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImageChannel() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImageChannel method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const CompositeOperator compose,Image *source_image,
% const ssize_t x_offset,const ssize_t y_offset)
% MagickBooleanType CompositeImageChannel(Image *image,
% const ChannelType channel,const CompositeOperator compose,
% Image *source_image,const ssize_t x_offset,const ssize_t y_offset)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o channel: the channel.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o source_image: the composite (source) image.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'source_image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o "compose:outside-overlay"
% Modify how the composition is to effect areas not directly covered
% by the 'source_image' at the offset given. Normally this is
% dependant on the 'compose' method, especially Duff-Porter methods.
%
% If set to "false" then disable all normal handling of pixels not
% covered by the source_image. Typically used for repeated tiling
% of the source_image by the calling API.
%
% Previous to IM v6.5.3-3 this was called "modify-outside-overlay"
%
*/
/*
** Programmers notes on SVG specification.
**
** A Composition is defined by...
** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc)
** Y = 1 for source preserved
** Z = 1 for canvas preserved
**
** Conversion to transparency (then optimized)
** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
**
** Where...
** Sca = Sc*Sa normalized Source color divided by Source alpha
** Dca = Dc*Da normalized Dest color divided by Dest alpha
** Dc' = Dca'/Da' the desired color value for this channel.
**
** Da' in in the follow formula as 'gamma' The resulting alpla value.
**
**
** Most functions use a blending mode of over (X=1,Y=1,Z=1)
** this results in the following optimizations...
** gamma = Sa+Da-Sa*Da;
** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta;
** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma
**
** The above SVG definitions also definate that Mathematical Composition
** methods should use a 'Over' blending mode for Alpha Channel.
** It however was not applied for composition modes of 'Plus', 'Minus',
** the modulus versions of 'Add' and 'Subtract'.
**
**
** Mathematical operator changes to be applied from IM v6.7...
**
** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
** 'ModulusAdd' and 'ModulusSubtract' for clarity.
**
** 2/ All mathematical compositions work as per the SVG specification
** with regard to blending. This now includes 'ModulusAdd' and
** 'ModulusSubtract'.
**
** 3/ When the special channel flag 'sync' (syncronize channel updates)
** is turned off (enabled by default) then mathematical compositions are
** only performed on the channels specified, and are applied
** independantally of each other. In other words the mathematics is
** performed as 'pure' mathematical operations, rather than as image
** operations.
*/
static inline MagickRealType Atop(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,
const MagickRealType magick_unused(Da))
{
magick_unreferenced(Da);
return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */
}
static inline void CompositeAtop(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */
composite->red=Atop(p->red,Sa,q->red,1.0);
composite->green=Atop(p->green,Sa,q->green,1.0);
composite->blue=Atop(p->blue,Sa,q->blue,1.0);
if (q->colorspace == CMYKColorspace)
composite->index=Atop(p->index,Sa,q->index,1.0);
}
/*
What is this Composition method for? Can't find any specification!
WARNING this is not doing correct 'over' blend handling (Anthony Thyssen).
*/
static inline void CompositeBumpmap(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
intensity;
intensity=MagickPixelIntensity(p);
composite->red=QuantumScale*intensity*q->red;
composite->green=QuantumScale*intensity*q->green;
composite->blue=QuantumScale*intensity*q->blue;
composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity;
if (q->colorspace == CMYKColorspace)
composite->index=QuantumScale*intensity*q->index;
}
static inline void CompositeClear(const MagickPixelPacket *q,
MagickPixelPacket *composite)
{
composite->opacity=(MagickRealType) TransparentOpacity;
composite->red=0.0;
composite->green=0.0;
composite->blue=0.0;
if (q->colorspace == CMYKColorspace)
composite->index=0.0;
}
static MagickRealType ColorBurn(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification.
*/
if (Sca*Da + Dca*Sa <= Sa*Da)
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa));
#else
/*
March 2009 SVG specification.
*/
if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon))
return(Sa*Da+Dca*(1.0-Sa));
if (Sca < MagickEpsilon)
return(Dca*(1.0-Sa));
return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
#endif
}
static inline void CompositeColorBurn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType ColorDodge(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
Oct 2004 SVG specification.
*/
if ((Sca*Da+Dca*Sa) >= Sa*Da)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
#if 0
/*
New specification, March 2009 SVG specification. This specification was
also wrong of non-overlap cases.
*/
if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da));
if (fabs(Sca-Sa) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca)));
#endif
#if 0
/*
Working from first principles using the original formula:
f(Sc,Dc) = Dc/(1-Sc)
This works correctly! Looks like the 2004 model was right but just
required a extra condition for correct handling.
*/
if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
if (fabs(Sca-Sa) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
#endif
}
static inline void CompositeColorDodge(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Darken(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
if (p < q)
return(MagickOver_(p,alpha,q,beta)); /* src-over */
return(MagickOver_(q,beta,p,alpha)); /* dst-over */
}
static inline void CompositeDarken(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
double
gamma;
if ( (channel & SyncChannels) != 0 ) {
composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */
gamma=1.0-QuantumScale*composite->opacity;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity);
composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity);
composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=MagickMax(p->opacity,q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=MagickMin(p->red,q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=MagickMin(p->green,q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=MagickMin(p->blue,q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=MagickMin(p->index,q->index);
}
}
static inline void CompositeDarkenIntensity(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Select the pixel based on the intensity level.
If 'Sync' flag select whole pixel based on alpha weighted intensity.
Otherwise use intensity only, but restrict copy according to channel.
*/
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity;
Da=1.0-QuantumScale*q->opacity;
*composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q))
? *p : *q;
}
else {
int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q));
if ( (channel & AlphaChannel) != 0 )
composite->opacity = from_p ? p->opacity : q->opacity;
if ( (channel & RedChannel) != 0 )
composite->red = from_p ? p->red : q->red;
if ( (channel & GreenChannel) != 0 )
composite->green = from_p ? p->green : q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue = from_p ? p->blue : q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index = from_p ? p->index : q->index;
}
}
static inline MagickRealType Difference(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,const MagickRealType Da)
{
/* Optimized by Multipling by QuantumRange (taken from gamma). */
return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q));
}
static inline void CompositeDifference(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
/* Values are not normalized as an optimization. */
composite->red=gamma*Difference(p->red,Sa,q->red,Da);
composite->green=gamma*Difference(p->green,Sa,q->green,Da);
composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Difference(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity));
if ( (channel & RedChannel) != 0 )
composite->red=fabs((double) (p->red-q->red));
if ( (channel & GreenChannel) != 0 )
composite->green=fabs((double) (p->green-q->green));
if ( (channel & BlueChannel) != 0 )
composite->blue=fabs((double) (p->blue-q->blue));
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=fabs((double) (p->index-q->index));
}
}
static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa,
const MagickRealType Dca,const MagickRealType Da)
{
/*
Divide Source by Destination
f(Sc,Dc) = Sc / Dc
But with appropriate handling for special case of Dc == 0 specifically
so that f(Black,Black)=Black and f(non-Black,Black)=White.
It is however also important to correctly do 'over' alpha blending which
is why the formula becomes so complex.
*/
if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon))
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
if (fabs(Dca) < MagickEpsilon)
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeDivide(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0);
}
}
static MagickRealType Exclusion(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeExclusion(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
gamma,
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ((channel & AlphaChannel) != 0)
composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0));
if ((channel & RedChannel) != 0)
composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0,
QuantumScale*q->red,1.0);
if ((channel & GreenChannel) != 0)
composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0,
QuantumScale*q->green,1.0);
if ((channel & BlueChannel) != 0)
composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0,
QuantumScale*q->blue,1.0);
if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace))
composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0,
QuantumScale*q->index,1.0);
}
}
static MagickRealType HardLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
if ((2.0*Sca) < Sa)
return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeHardLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType HardMix(const MagickRealType Sca,
const MagickRealType Dca)
{
if ((Sca+Dca) < QuantumRange)
return(0.0);
else
return(1.0);
}
static inline void CompositeHardMix(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*HardMix(p->red*Sa,q->red*Da);
composite->green=gamma*HardMix(p->green*Sa,q->green*Da);
composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*HardMix(p->index*Sa,q->index*Da);
}
static void HCLComposite(const double hue,const double chroma,const double luma,
MagickRealType *red,MagickRealType *green,MagickRealType *blue)
{
double
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,double *hue,double *chroma,double *luma)
{
double
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (double *) NULL);
assert(chroma != (double *) NULL);
assert(luma != (double *) NULL);
r=(double) red;
g=(double) green;
b=(double) blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(double) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == (MagickRealType) max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == (MagickRealType) max)
h=((b-r)/c)+2.0;
else
if (blue == (MagickRealType) max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa,
const MagickRealType magick_unused(q),const MagickRealType Da)
{
magick_unreferenced(q);
return(Sa*p*Da);
}
static inline void CompositeIn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa*Da;
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*In(p->red,Sa,q->red,Da);
composite->green=gamma*In(p->green,Sa,q->green,Da);
composite->blue=gamma*In(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*In(p->index,Sa,q->index,Da);
}
static inline MagickRealType Lighten(const MagickRealType p,
const MagickRealType alpha,const MagickRealType q,const MagickRealType beta)
{
if (p > q)
return(MagickOver_(p,alpha,q,beta)); /* src-over */
return(MagickOver_(q,beta,p,alpha)); /* dst-over */
}
static inline void CompositeLighten(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Lighten is also equvalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
double
gamma;
if ( (channel & SyncChannels) != 0 ) {
composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */
gamma=1.0-QuantumScale*composite->opacity;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity);
composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity);
composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=MagickMin(p->opacity,q->opacity);
if ( (channel & RedChannel) != 0 )
composite->red=MagickMax(p->red,q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=MagickMax(p->green,q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=MagickMax(p->blue,q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=MagickMax(p->index,q->index);
}
}
static inline void CompositeLightenIntensity(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
/*
Select the pixel based on the intensity level.
If 'Sync' flag select whole pixel based on alpha weighted intensity.
Otherwise use Intenisty only, but restrict copy according to channel.
*/
if ( (channel & SyncChannels) != 0 ) {
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity;
Da=1.0-QuantumScale*q->opacity;
*composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q))
? *p : *q;
}
else {
int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q));
if ( (channel & AlphaChannel) != 0 )
composite->opacity = from_p ? p->opacity : q->opacity;
if ( (channel & RedChannel) != 0 )
composite->red = from_p ? p->red : q->red;
if ( (channel & GreenChannel) != 0 )
composite->green = from_p ? p->green : q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue = from_p ? p->blue : q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index = from_p ? p->index : q->index;
}
}
#if 0
static inline MagickRealType LinearDodge(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
LinearDodge: simplifies to a trivial formula
f(Sc,Dc) = Sc + Dc
Dca' = Sca + Dca
*/
return(Sca+Dca);
}
#endif
static inline void CompositeLinearDodge(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*(p->red*Sa+q->red*Da);
composite->green=gamma*(p->green*Sa+q->green*Da);
composite->blue=gamma*(p->blue*Sa+q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*(p->index*Sa+q->index*Da);
}
static inline MagickRealType LinearBurn(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
return(Sca+Dca-Sa*Da);
}
static inline void CompositeLinearBurn(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType LinearLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
#if 0
/*
Previous formula, was only valid for fully-opaque images.
*/
return(Dca+2*Sca-1.0);
#else
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
return((Sca-Sa)*Da+Sca+Dca);
#endif
}
static inline void CompositeLinearLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Mathematics(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da,
const GeometryInfo *geometry_info)
{
/*
'Mathematics' a free form user control mathematical composition is defined
as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite as
a command separated 'geometry' string in "compose:args" image artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+
geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+
Dca*(1.0-Sa));
}
static inline void CompositeMathematics(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo
*args, MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da,args);
composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da,args);
composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da,args);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da,args);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*
Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*
Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*
Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*
Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args);
}
}
static inline void CompositePlus(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
/*
NOTE: "Plus" does not use 'over' alpha-blending but uses a
special 'plus' form of alph-blending. It is the ONLY mathematical
operator to do this. this is what makes it different to the
otherwise equivalent "LinearDodge" composition method.
Note however that color channels are still effected by the alpha channel
as a result of the blending, making it just as useless for independant
channel maths, just like all other mathematical composition methods.
As such the removal of the 'sync' flag, is still a usful convention.
The MagickPixelCompositePlus() function is defined in
"composite-private.h" so it can also be used for Image Blending.
*/
MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=p->opacity+q->opacity-QuantumRange;
if ( (channel & RedChannel) != 0 )
composite->red=p->red+q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=p->green+q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=p->blue+q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=p->index+q->index;
}
}
static inline MagickRealType Minus(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,
const MagickRealType magick_unused(Da))
{
/*
Minus Source from Destination
f(Sc,Dc) = Sc - Dc
*/
magick_unreferenced(Da);
return(Sca+Dca-2*Dca*Sa);
}
static inline void CompositeMinus(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da);
composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da);
composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-(Sa-Da));
if ( (channel & RedChannel) != 0 )
composite->red=p->red-q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=p->green-q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=p->blue-q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=p->index-q->index;
}
}
static inline MagickRealType ModulusAdd(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,const MagickRealType Da)
{
MagickRealType
pixel;
pixel=p+q;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa));
}
static inline void CompositeModulusAdd(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
double
gamma;
MagickRealType
Sa,
Da;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=ModulusAdd(p->red,Sa,q->red,Da);
composite->green=ModulusAdd(p->green,Sa,q->green,Da);
composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=ModulusAdd(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity,
1.0,QuantumRange-q->opacity,1.0);
if ( (channel & RedChannel) != 0 )
composite->red=ModulusAdd(p->red,1.0,q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=ModulusAdd(p->green,1.0,q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=ModulusAdd(p->index,1.0,q->index,1.0);
}
}
static inline MagickRealType ModulusSubtract(const MagickRealType p,
const MagickRealType Sa,const MagickRealType q,const MagickRealType Da)
{
MagickRealType
pixel;
pixel=p-q;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa));
}
static inline void CompositeModulusSubtract(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
if ( (channel & SyncChannels) != 0 ) {
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma = RoundToUnity(Sa+Da-Sa*Da);
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=ModulusSubtract(p->red,Sa,q->red,Da);
composite->green=ModulusSubtract(p->green,Sa,q->green,Da);
composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=ModulusSubtract(p->index,Sa,q->index,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity,
1.0,QuantumRange-q->opacity,1.0);
if ( (channel & RedChannel) != 0 )
composite->red=ModulusSubtract(p->red,1.0,q->red,1.0);
if ( (channel & GreenChannel) != 0 )
composite->green=ModulusSubtract(p->green,1.0,q->green,1.0);
if ( (channel & BlueChannel) != 0 )
composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=ModulusSubtract(p->index,1.0,q->index,1.0);
}
}
static inline MagickRealType Multiply(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeMultiply(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Sa*Da);
if ( (channel & RedChannel) != 0 )
composite->red=QuantumScale*p->red*q->red;
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumScale*p->green*q->green;
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumScale*p->blue*q->blue;
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumScale*p->index*q->index;
}
}
static inline MagickRealType Out(const MagickRealType p,
const MagickRealType Sa,const MagickRealType magick_unused(q),
const MagickRealType Da)
{
magick_unreferenced(q);
return(Sa*p*(1.0-Da));
}
static inline void CompositeOut(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa*(1.0-Da);
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*Out(p->red,Sa,q->red,Da);
composite->green=gamma*Out(p->green,Sa,q->green,Da);
composite->blue=gamma*Out(p->blue,Sa,q->blue,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Out(p->index,Sa,q->index,Da);
}
static MagickRealType PegtopLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
PegTop: A Soft-Light alternative: A continuous version of the Softlight
function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs(Da) < MagickEpsilon)
return(Sca);
return(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositePegtopLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType PinLight(const MagickRealType Sca,
const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da)
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if (Dca*Sa < Da*(2*Sca-Sa))
return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
if ((Dca*Sa) > (2*Sca*Da))
return(Sca*Da+Sca+Dca*(1.0-Sa));
return(Sca*(1.0-Da)+Dca);
}
static inline void CompositePinLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static inline MagickRealType Screen(const MagickRealType Sca,
const MagickRealType Dca)
{
/* Screen: A negated multiply
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
return(Sca+Dca-Sca*Dca);
}
static inline void CompositeScreen(const MagickPixelPacket *p,
const MagickPixelPacket *q,const ChannelType channel,
MagickPixelPacket *composite)
{
double
gamma;
MagickRealType
Da,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
if ( (channel & SyncChannels) != 0 ) {
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
Sa*=(MagickRealType) QuantumScale;
Da*=(MagickRealType) QuantumScale; /* optimization */
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*Screen(p->red*Sa,q->red*Da);
composite->green=gamma*Screen(p->green*Sa,q->green*Da);
composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Screen(p->index*Sa,q->index*Da);
}
else { /* handle channels as separate grayscale channels */
if ( (channel & AlphaChannel) != 0 )
composite->opacity=QuantumRange*(1.0-Screen(Sa,Da));
if ( (channel & RedChannel) != 0 )
composite->red=QuantumRange*Screen(QuantumScale*p->red,
QuantumScale*q->red);
if ( (channel & GreenChannel) != 0 )
composite->green=QuantumRange*Screen(QuantumScale*p->green,
QuantumScale*q->green);
if ( (channel & BlueChannel) != 0 )
composite->blue=QuantumRange*Screen(QuantumScale*p->blue,
QuantumScale*q->blue);
if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace)
composite->index=QuantumRange*Screen(QuantumScale*p->index,
QuantumScale*q->index);
}
}
static MagickRealType SoftLight(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da)
{
#if 0
/*
Oct 2004 SVG specification -- was found to be incorrect
See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html.
*/
if (2.0*Sca < Sa)
return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa));
if (8.0*Dca <= Da)
return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+
Dca*(1.0-Sa));
#else
MagickRealType
alpha,
beta;
/*
New specification: March 2009 SVG specification.
*/
alpha=Dca/Da;
if ((2.0*Sca) < Sa)
return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa));
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0*
alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa);
return(beta);
}
beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa);
return(beta);
#endif
}
static inline void CompositeSoftLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
/*
Deprecated
Multiply difference by amount, if differance larger than threshold???
What use this is is completely unknown
The Opacity calculation appears to be inverted -- Anthony Thyssen
*/
static inline MagickRealType Threshold(const MagickRealType p,
const MagickRealType q,const MagickRealType threshold,
const MagickRealType amount)
{
MagickRealType
delta;
delta=p-q;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
return(q);
return(q+delta*amount);
}
static inline void CompositeThreshold(const MagickPixelPacket *p,
const MagickPixelPacket *q,const MagickRealType threshold,
const MagickRealType amount,MagickPixelPacket *composite)
{
composite->red=Threshold(p->red,q->red,threshold,amount);
composite->green=Threshold(p->green,q->green,threshold,amount);
composite->blue=Threshold(p->blue,q->blue,threshold,amount);
composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity,
threshold,amount);
if (q->colorspace == CMYKColorspace)
composite->index=Threshold(p->index,q->index,threshold,amount);
}
static MagickRealType VividLight(const MagickRealType Sca,
const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da)
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon))
return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
if ((2*Sca) <= Sa)
return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeVividLight(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma);
composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale*
q->red*Da,Da);
composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale*
q->green*Da,Da);
composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale*
q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale*
q->index*Da,Da);
}
static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa,
const MagickRealType Dca,const MagickRealType Da)
{
return(Sca*(1.0-Da)+Dca*(1.0-Sa));
}
static inline void CompositeXor(const MagickPixelPacket *p,
const MagickPixelPacket *q,MagickPixelPacket *composite)
{
MagickRealType
Da,
gamma,
Sa;
Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */
Da=1.0-QuantumScale*q->opacity;
gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */
composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma);
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da);
composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da);
composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da);
if (q->colorspace == CMYKColorspace)
composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const CompositeOperator compose,const Image *source_image,
const ssize_t x_offset,const ssize_t y_offset)
{
MagickBooleanType
status;
status=CompositeImageChannel(image,DefaultChannels,compose,source_image,
x_offset,y_offset);
return(status);
}
MagickExport MagickBooleanType CompositeImageChannel(Image *image,
const ChannelType channel,const CompositeOperator compose,
const Image *composite,const ssize_t x_offset,const ssize_t y_offset)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
ExceptionInfo
*exception;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
clip_to_self,
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
/*
Prepare composite image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
exception=(&image->exception);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
(void) SetImageColorspace(source_image,image->colorspace);
GetMagickPixelPacket(image,&zero);
canvas_image=(Image *) NULL;
amount=0.5;
canvas_dissolve=1.0;
clip_to_self=MagickTrue;
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case ClearCompositeOp:
case SrcCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
case OutCompositeOp:
case SrcOutCompositeOp:
case DstInCompositeOp:
case DstAtopCompositeOp:
{
/*
Modify canvas outside the overlaid region.
*/
clip_to_self=MagickFalse;
break;
}
case OverCompositeOp:
{
if (image->matte != MagickFalse)
break;
if (source_image->matte != MagickFalse)
break;
}
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*source_indexes;
register const PixelPacket
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,
1,exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
(void) CopyMagickMemory(q,p,source_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(source_indexes != (const IndexPacket *) NULL))
(void) CopyMagickMemory(indexes,source_indexes,
source_image->columns*sizeof(*indexes));
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyOpacityCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
clip_to_self=MagickFalse;
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view,
*source_view;
MagickPixelPacket
pixel;
MagickRealType
angle_range,
angle_start,
height,
width;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/* default the unrotated ellipse width and height axis vectors */
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter,1.0);
/* do the variable blurring of each pixel in image */
pixel=zero;
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict canvas_indexes;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
break;
canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view);
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p++;
continue;
}
if (fabs(angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",
blur.x1, blur.x2, blur.y1, blur.y2);
fprintf(stderr, "scaled by=%lf,%lf\n",
QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p));
}
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(p),
blur.y1*QuantumScale*GetPixelGreen(p),
blur.x2*QuantumScale*GetPixelRed(p),
blur.y2*QuantumScale*GetPixelGreen(p));
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double)
y_offset+y,&pixel);
SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x);
p++;
r++;
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view,
*source_view,
*image_view;
MagickPixelPacket
pixel;
MagickRealType
horizontal_scale,
vertical_scale;
PointInfo
center,
offset;
register IndexPacket
*magick_restrict canvas_indexes;
register PixelPacket
*magick_restrict r;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=((MagickRealType) image->columns-1)/2.0;
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) == 0)
center.x=(MagickRealType) (x_offset+geometry_info.xi);
else
center.x=geometry_info.xi;
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=((MagickRealType) image->rows-1)/2.0;
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
pixel=zero;
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,
1,exception);
r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
break;
canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view);
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p++;
continue;
}
/*
Displace the offset.
*/
offset.x=(double) ((horizontal_scale*(GetPixelRed(p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0));
offset.y=(double) ((vertical_scale*(GetPixelGreen(p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0));
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale*
pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p)));
SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x);
p++;
r++;
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
clip_to_self=MagickFalse;
if ((canvas_dissolve+MagickEpsilon) > 1.0 )
{
canvas_dissolve=1.0;
clip_to_self=MagickTrue;
}
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
clip_to_self=MagickFalse;
if ((canvas_dissolve+MagickEpsilon) > 1.0)
clip_to_self=MagickTrue;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
This Composition method is deprecated
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
value=GetImageArtifact(image,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsMagickTrue(value);
/*
Composite image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateCompositeImage(image,channel,compose,source_image,
x_offset,y_offset,canvas_dissolve,source_dissolve,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
GetMagickPixelPacket(source_image,&zero);
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*pixels;
double
luma,
hue,
chroma,
sans;
MagickPixelPacket
composite,
canvas,
source;
register const IndexPacket
*magick_restrict source_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(PixelPacket *) NULL;
p=(PixelPacket *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset;
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
GetMagickPixelPacket(source_image,&source);
GetMagickPixelPacket(image,&canvas);
hue=0.0;
chroma=0.0;
luma=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q++;
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
canvas.red=(MagickRealType) GetPixelRed(q);
canvas.green=(MagickRealType) GetPixelGreen(q);
canvas.blue=(MagickRealType) GetPixelBlue(q);
if (image->matte != MagickFalse)
canvas.opacity=(MagickRealType) GetPixelOpacity(q);
if (image->colorspace == CMYKColorspace)
canvas.index=(MagickRealType) GetPixelIndex(indexes+x);
if (image->colorspace == CMYKColorspace)
{
canvas.red=(MagickRealType) QuantumRange-canvas.red;
canvas.green=(MagickRealType) QuantumRange-canvas.green;
canvas.blue=(MagickRealType) QuantumRange-canvas.blue;
canvas.index=(MagickRealType) QuantumRange-canvas.index;
}
/*
Handle canvas modifications outside overlaid region.
*/
composite=canvas;
if ((pixels == (PixelPacket *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
switch (compose)
{
case DissolveCompositeOp:
case BlendCompositeOp:
{
composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve*
(QuantumRange-composite.opacity));
break;
}
case ClearCompositeOp:
case SrcCompositeOp:
{
CompositeClear(&canvas,&composite);
break;
}
case InCompositeOp:
case SrcInCompositeOp:
case OutCompositeOp:
case SrcOutCompositeOp:
case DstInCompositeOp:
case DstAtopCompositeOp:
case CopyOpacityCompositeOp:
case ChangeMaskCompositeOp:
{
composite.opacity=(MagickRealType) TransparentOpacity;
break;
}
default:
{
(void) GetOneVirtualMagickPixel(source_image,x-x_offset,
y-y_offset,&composite,exception);
break;
}
}
if (image->colorspace == CMYKColorspace)
{
composite.red=(MagickRealType) QuantumRange-composite.red;
composite.green=(MagickRealType) QuantumRange-composite.green;
composite.blue=(MagickRealType) QuantumRange-composite.blue;
composite.index=(MagickRealType) QuantumRange-composite.index;
}
SetPixelRed(q,clamp != MagickFalse ?
ClampPixel(composite.red) : ClampToQuantum(composite.red));
SetPixelGreen(q,clamp != MagickFalse ?
ClampPixel(composite.green) : ClampToQuantum(composite.green));
SetPixelBlue(q,clamp != MagickFalse ?
ClampPixel(composite.blue) : ClampToQuantum(composite.blue));
if (image->matte != MagickFalse)
SetPixelOpacity(q,clamp != MagickFalse ?
ClampPixel(composite.opacity) :
ClampToQuantum(composite.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,clamp != MagickFalse ?
ClampPixel(composite.index) : ClampToQuantum(composite.index));
q++;
continue;
}
/*
Handle normal overlay of source onto canvas.
*/
source.red=(MagickRealType) GetPixelRed(p);
source.green=(MagickRealType) GetPixelGreen(p);
source.blue=(MagickRealType) GetPixelBlue(p);
if (source_image->matte != MagickFalse)
source.opacity=(MagickRealType) GetPixelOpacity(p);
if (source_image->colorspace == CMYKColorspace)
source.index=(MagickRealType) GetPixelIndex(source_indexes+
x-x_offset);
if (source_image->colorspace == CMYKColorspace)
{
source.red=(MagickRealType) QuantumRange-source.red;
source.green=(MagickRealType) QuantumRange-source.green;
source.blue=(MagickRealType) QuantumRange-source.blue;
source.index=(MagickRealType) QuantumRange-source.index;
}
switch (compose)
{
/* Duff-Porter Compositions */
case ClearCompositeOp:
{
CompositeClear(&canvas,&composite);
break;
}
case SrcCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
{
composite=source;
break;
}
case NoCompositeOp:
case DstCompositeOp:
break;
case OverCompositeOp:
case SrcOverCompositeOp:
{
MagickPixelCompositeOver(&source,source.opacity,&canvas,
canvas.opacity,&composite);
break;
}
case DstOverCompositeOp:
{
MagickPixelCompositeOver(&canvas,canvas.opacity,&source,
source.opacity,&composite);
break;
}
case SrcInCompositeOp:
case InCompositeOp:
{
CompositeIn(&source,&canvas,&composite);
break;
}
case DstInCompositeOp:
{
CompositeIn(&canvas,&source,&composite);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
CompositeOut(&source,&canvas,&composite);
break;
}
case DstOutCompositeOp:
{
CompositeOut(&canvas,&source,&composite);
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
CompositeAtop(&source,&canvas,&composite);
break;
}
case DstAtopCompositeOp:
{
CompositeAtop(&canvas,&source,&composite);
break;
}
case XorCompositeOp:
{
CompositeXor(&source,&canvas,&composite);
break;
}
/* Mathematical Compositions */
case PlusCompositeOp:
{
CompositePlus(&source,&canvas,channel,&composite);
break;
}
case MinusDstCompositeOp:
{
CompositeMinus(&source,&canvas,channel,&composite);
break;
}
case MinusSrcCompositeOp:
{
CompositeMinus(&canvas,&source,channel,&composite);
break;
}
case ModulusAddCompositeOp:
{
CompositeModulusAdd(&source,&canvas,channel,&composite);
break;
}
case ModulusSubtractCompositeOp:
{
CompositeModulusSubtract(&source,&canvas,channel,&composite);
break;
}
case DifferenceCompositeOp:
{
CompositeDifference(&source,&canvas,channel,&composite);
break;
}
case ExclusionCompositeOp:
{
CompositeExclusion(&source,&canvas,channel,&composite);
break;
}
case MultiplyCompositeOp:
{
CompositeMultiply(&source,&canvas,channel,&composite);
break;
}
case ScreenCompositeOp:
{
CompositeScreen(&source,&canvas,channel,&composite);
break;
}
case DivideDstCompositeOp:
{
CompositeDivide(&source,&canvas,channel,&composite);
break;
}
case DivideSrcCompositeOp:
{
CompositeDivide(&canvas,&source,channel,&composite);
break;
}
case DarkenCompositeOp:
{
CompositeDarken(&source,&canvas,channel,&composite);
break;
}
case LightenCompositeOp:
{
CompositeLighten(&source,&canvas,channel,&composite);
break;
}
case DarkenIntensityCompositeOp:
{
CompositeDarkenIntensity(&source,&canvas,channel,&composite);
break;
}
case LightenIntensityCompositeOp:
{
CompositeLightenIntensity(&source,&canvas,channel,&composite);
break;
}
case MathematicsCompositeOp:
{
CompositeMathematics(&source,&canvas,channel,&geometry_info,
&composite);
break;
}
/* Lighting Compositions */
case ColorDodgeCompositeOp:
{
CompositeColorDodge(&source,&canvas,&composite);
break;
}
case ColorBurnCompositeOp:
{
CompositeColorBurn(&source,&canvas,&composite);
break;
}
case LinearDodgeCompositeOp:
{
CompositeLinearDodge(&source,&canvas,&composite);
break;
}
case LinearBurnCompositeOp:
{
CompositeLinearBurn(&source,&canvas,&composite);
break;
}
case HardLightCompositeOp:
{
CompositeHardLight(&source,&canvas,&composite);
break;
}
case HardMixCompositeOp:
{
CompositeHardMix(&source,&canvas,&composite);
break;
}
case OverlayCompositeOp:
{
/* Overlay = Reversed HardLight. */
CompositeHardLight(&canvas,&source,&composite);
break;
}
case SoftLightCompositeOp:
{
CompositeSoftLight(&source,&canvas,&composite);
break;
}
case LinearLightCompositeOp:
{
CompositeLinearLight(&source,&canvas,&composite);
break;
}
case PegtopLightCompositeOp:
{
CompositePegtopLight(&source,&canvas,&composite);
break;
}
case VividLightCompositeOp:
{
CompositeVividLight(&source,&canvas,&composite);
break;
}
case PinLightCompositeOp:
{
CompositePinLight(&source,&canvas,&composite);
break;
}
/* Other Composition */
case ChangeMaskCompositeOp:
{
if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) ||
(IsMagickColorSimilar(&source,&canvas) != MagickFalse))
composite.opacity=(MagickRealType) TransparentOpacity;
else
composite.opacity=(MagickRealType) OpaqueOpacity;
break;
}
case BumpmapCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
CompositeBumpmap(&source,&canvas,&composite);
break;
}
case DissolveCompositeOp:
{
MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange-
source_dissolve*(QuantumRange-source.opacity)),&canvas,
(MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange-
canvas.opacity)),&composite);
break;
}
case BlendCompositeOp:
{
MagickPixelCompositeBlend(&source,source_dissolve,&canvas,
canvas_dissolve,&composite);
break;
}
case StereoCompositeOp:
{
canvas.red=(MagickRealType) GetPixelRed(p);
break;
}
case ThresholdCompositeOp:
{
CompositeThreshold(&source,&canvas,threshold,amount,&composite);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (source.opacity == TransparentOpacity)
break;
offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint);
if (offset == 0)
break;
CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue,
&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&composite.red,&composite.green,
&composite.blue);
break;
}
case HueCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (canvas.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue,
&chroma,&luma);
CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < canvas.opacity)
composite.opacity=source.opacity;
break;
}
case SaturateCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (canvas.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue,
&chroma,&luma);
CompositeHCL(source.red,source.green,source.blue,&sans,&chroma,
&sans);
HCLComposite(hue,chroma,luma,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < canvas.opacity)
composite.opacity=source.opacity;
break;
}
case LuminizeCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (canvas.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue,
&chroma,&luma);
CompositeHCL(source.red,source.green,source.blue,&sans,&sans,
&luma);
HCLComposite(hue,chroma,luma,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < canvas.opacity)
composite.opacity=source.opacity;
break;
}
case ColorizeCompositeOp:
{
if (source.opacity == TransparentOpacity)
break;
if (canvas.opacity == TransparentOpacity)
{
composite=source;
break;
}
CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans,
&sans,&luma);
CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&composite.red,
&composite.green,&composite.blue);
if (source.opacity < canvas.opacity)
composite.opacity=source.opacity;
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
composite.red=source.red;
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
composite.green=source.green;
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
composite.blue=source.blue;
break;
}
case CopyOpacityCompositeOp:
{
if (source.matte == MagickFalse)
composite.opacity=(MagickRealType) (QuantumRange-
MagickPixelIntensityToQuantum(&source));
else
composite.opacity=source.opacity;
break;
}
case CopyBlackCompositeOp:
{
if (source.colorspace != CMYKColorspace)
ConvertRGBToCMYK(&source);
composite.index=QuantumRange-source.index;
break;
}
/* compose methods that are already handled */
case BlurCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
{
composite=source;
break;
}
default:
break;
}
if (image->colorspace == CMYKColorspace)
{
composite.red=(MagickRealType) QuantumRange-composite.red;
composite.green=(MagickRealType) QuantumRange-composite.green;
composite.blue=(MagickRealType) QuantumRange-composite.blue;
composite.index=(MagickRealType) QuantumRange-composite.index;
}
SetPixelRed(q,clamp != MagickFalse ?
ClampPixel(composite.red) : ClampToQuantum(composite.red));
SetPixelGreen(q,clamp != MagickFalse ?
ClampPixel(composite.green) : ClampToQuantum(composite.green));
SetPixelBlue(q,clamp != MagickFalse ?
ClampPixel(composite.blue) : ClampToQuantum(composite.blue));
SetPixelOpacity(q,clamp != MagickFalse ?
ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,clamp != MagickFalse ?
ClampPixel(composite.index) : ClampToQuantum(composite.index));
p++;
if (p >= (pixels+source_image->columns))
p=pixels;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImageChannel)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
ExceptionInfo
*exception;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
exception=(&image->exception);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse) ||
(texture_image->matte != MagickFalse)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,image->compose,texture_image,x+
texture_image->tile_offset.x,y+texture_image->tile_offset.y);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType)
y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,texture_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*texture_indexes;
register const PixelPacket
*p;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
size_t
width;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+
texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
texture_indexes=GetCacheViewVirtualIndexQueue(texture_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
(void) CopyMagickMemory(q,p,width*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(texture_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(indexes,texture_indexes,width*
sizeof(*indexes));
indexes+=width;
}
q+=width;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TextureImage)
#endif
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
tinyexr.h | /*
Copyright (c) 2014 - 2018, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
#ifndef TINYEXR_H_
#define TINYEXR_H_
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-5)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-6)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7)
#define TINYEXR_ERROR_INVALID_HEADER (-8)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
int tiled; // tile format image
int long_name; // long name attribute
int non_image; // deep image(EXR 2.0)
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES];
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Free's internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Free's internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if succes.
// Returns negative value and may set error string in `err` when there's an
// error
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEIFNED
#define TINYEXR_IMPLEMENTATION_DEIFNED
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
#include <limits>
#include <string>
#include <vector>
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
#ifdef _OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#include "zfp.h"
#endif
#if __cplusplus > 199711L
// C++11
#include <cstdint>
#endif // __cplusplus > 199711L
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#ifndef MINIZ_HAS_64BIT_REGISTERS
# define MINIZ_HAS_64BIT_REGISTERS 0
#endif
#ifndef TINFL_USE_64BIT_BITBUF
# if MINIZ_HAS_64BIT_REGISTERS
# define TINFL_USE_64BIT_BITBUF 1
# else
# define TINFL_USE_64BIT_BITBUF 0
# endif
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next,
*const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next,
*const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags &
~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
}
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static const int kEXRVersionSize = 8;
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
return false;
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen));
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
int data_window[4];
int line_order;
int display_window[4];
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
void clear() {
channels.clear();
attributes.clear();
data_window[0] = 0;
data_window[1] = 0;
data_window[2] = 0;
data_window[3] = 0;
line_order = 0;
display_window[0] = 0;
display_window[1] = 0;
display_window[2] = 0;
display_window[3] = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
}
} HeaderInfo;
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling));
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling));
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning( \
disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressable run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
} else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
if (0 > (maxLength -= count)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static void DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
assert(ret == static_cast<int>(uncompressed_size));
(void)ret;
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
int hlink[HUF_ENCSIZE];
long long *fHeap[HUF_ENCSIZE];
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
long long scode[HUF_ENCSIZE];
memset(scode, 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode);
memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode > ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#define getCode(po, rlc, c, lc, in, out, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out;
unsigned short *oe = out + no;
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
getCode(pl.p[j], rlc, c, lc, in, out, oe);
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(long long freq[HUF_ENCSIZE],
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
long long freq[HUF_ENCSIZE];
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq, &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq, im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq, raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
unsigned short raw[], int nRaw) {
if (nCompressed == 0) {
if (nRaw != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw);
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap,
minNonZero, maxNonZero);
unsigned short lut[USHORT_RANGE];
unsigned short maxValue = forwardLutFromBitmap(bitmap, lut);
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap, 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
unsigned short lut[USHORT_RANGE];
memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap, lut);
//
// Huffman decoding
//
int length;
length = *(reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0),
static_cast<int>(tmpBufSize));
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
int precision;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0f;
}
};
bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) &&
(attributes[i].size == 1)) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
}
}
if (!foundType) {
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
} else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
} else {
assert(0);
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
int num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size = dst_width * dst_num_lines * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((dst_width & 3U) || (dst_num_lines & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, dst_width, dst_num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2,
/* write random access */ 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = dst_width * dst_num_lines;
for (int c = 0; c < num_channels; c++) {
// decompress 4x4 pixel block.
for (int y = 0; y < dst_num_lines; y += 4) {
for (int x = 0; x < dst_width; x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * dst_width + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize,
const float *inPtr, int width, int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((width & 3U) || (num_lines & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, width, num_lines * num_channels);
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision, zfp_type_float);
} else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float);
} else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = width * num_lines;
for (int c = 0; c < num_channels; c++) {
// compress 4x4 pixel block.
for (int y = 0; y < num_lines; y += 4) {
for (int x = 0; x < width; x += 4) {
float fblock[16];
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * width + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = zfp_stream_compressed_size(zfp);
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
assert(ret);
(void)ret;
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)),
dstLen, data_ptr,
static_cast<unsigned long>(data_len));
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
} else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param, attributes,
num_attributes)) {
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
} else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
} else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
} else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr +
c * static_cast<size_t>(width) * sizeof(unsigned short));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
hf.u = line_ptr[u];
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
return false;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(float));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
float val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int));
unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += y * x_stride;
} else {
outLine += (height - 1 - y) * x_stride;
}
for (int u = 0; u < width; u++) {
unsigned int val = line_ptr[u];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
return true;
}
static void DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
assert(tile_offset_x * tile_size_x < data_width);
assert(tile_offset_y * tile_size_y < data_height);
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
} else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
} else {
(*height) = tile_size_y;
}
// Image size = tile size.
DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
} else {
assert(0);
}
}
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
} else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else {
assert(0);
}
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
} else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
} else {
assert(0);
}
}
return images;
}
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
info->data_window[0] = 0;
info->data_window[1] = 0;
info->data_window[2] = 0;
info->data_window[3] = 0;
info->line_order = 0; // @fixme
info->display_window[0] = 0;
info->display_window[1] = 0;
info->display_window[2] = 0;
info->display_window[3] = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (version->tiled && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
} else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) = "Failed to parse channel info.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) = "# of channels is zero.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
} else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window[0], &data.at(0), sizeof(int));
memcpy(&info->data_window[1], &data.at(4), sizeof(int));
memcpy(&info->data_window[2], &data.at(8), sizeof(int));
memcpy(&info->data_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3]));
has_data_window = true;
}
} else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window[0], &data.at(0), sizeof(int));
memcpy(&info->display_window[1], &data.at(4), sizeof(int));
memcpy(&info->display_window[2], &data.at(8), sizeof(int));
memcpy(&info->display_window[3], &data.at(12), sizeof(int));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[1]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[2]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->display_window[3]));
has_display_window = true;
}
} else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
} else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio));
has_pixel_aspect_ratio = true;
}
} else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[0]));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_center[1]));
has_screen_window_center = true;
}
} else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(
reinterpret_cast<unsigned int *>(&info->screen_window_width));
has_screen_window_width = true;
}
} else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window[0] = info.display_window[0];
exr_header->display_window[1] = info.display_window[1];
exr_header->display_window[2] = info.display_window[2];
exr_header->display_window[3] = info.display_window[3];
exr_header->data_window[0] = info.data_window[0];
exr_header->data_window[1] = info.data_window[1];
exr_header->data_window[2] = info.data_window[2];
exr_header->data_window[3] = info.data_window[3];
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES);
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy poiner
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
exr_header->header_len = info.header_len;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels);
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size - size_t(offsets[tile_idx] + sizeof(int) * 5);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size - size_t(offsets[y_idx] + sizeof(int) * 2);
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else {
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
// assert(num_lines > 0);
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
line_no -= exr_header->data_window[1];
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0];
if (data_width >= std::numeric_limits<int>::max()) {
// Issue 63
if (err) {
(*err) = "Invalid data window value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
data_width++;
int data_height = exr_header->data_window[3] - exr_header->data_window[1];
if (data_height >= std::numeric_limits<int>::max()) {
if (err) {
(*err) = "Invalid data height value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
data_height++;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
(*err) = "Invalid data window value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Read offset tables.
size_t num_blocks = 0;
if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
} else if (exr_header->tiled) {
// @todo { LoD }
size_t num_x_tiles = static_cast<size_t>(data_width) /
static_cast<size_t>(exr_header->tile_size_x);
if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) <
static_cast<size_t>(data_width)) {
num_x_tiles++;
}
size_t num_y_tiles = static_cast<size_t>(data_height) /
static_cast<size_t>(exr_header->tile_size_y);
if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) <
static_cast<size_t>(data_height)) {
num_y_tiles++;
}
num_blocks = num_x_tiles * num_y_tiles;
} else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
}
std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks);
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset value.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
} else {
if (err) {
(*err) = "Cannot reconstruct lineOffset table.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
return DecodeChunk(exr_image, exr_header, offsets, head, size);
}
} // namespace tinyexr
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
if (err) {
(*err) = "Loading multipart or DeepImage is not supported yet.\n";
}
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) {
// Alpha channel only.
if (exr_header.tiled) {
// todo.implement this
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
} else {
// Assume RGB(A)
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
} else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
} else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // May leak
#else
(*err) = strdup(err_str.c_str()); // May leak
#endif
}
}
ConvertHeader(exr_header, info);
// transfoer `tiled` from version.
exr_header->tiled = version->tiled;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
} else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
} else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
} else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
size_t SaveEXRImageToMemory(const EXRImage *exr_image,
const EXRHeader *exr_header,
unsigned char **memory_out, const char **err) {
if (exr_image == NULL || memory_out == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return 0; // @fixme
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#if TINYEXR_USE_ZFP
for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) {
if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) {
if (err) {
(*err) = "Pixel type must be FLOAT for ZFP compression.";
}
return 0;
}
}
#endif
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
char marker[] = {2, 0, 0, 0};
/* @todo
if (exr_header->tiled) {
marker[1] |= 0x2;
}
if (exr_header->long_name) {
marker[1] |= 0x4;
}
if (exr_header->non_image) {
marker[1] |= 0x8;
}
if (exr_header->multipart) {
marker[1] |= 0x10;
}
*/
memory.insert(memory.end(), marker, marker + 4);
}
int num_scanlines = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
// Write attributes.
std::vector<tinyexr::ChannelInfo> channels;
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_header->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_header->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_header->channels[c].name);
channels.push_back(info);
}
tinyexr::WriteChannelInfo(data, channels);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_header->compression_type;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp));
tinyexr::WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1};
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3]));
tinyexr::WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
tinyexr::WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
float aspectRatio = 1.0f;
tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
tinyexr::WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(¢er[1]));
tinyexr::WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float));
}
{
float w = static_cast<float>(exr_image->width);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exr_header->num_custom_attributes > 0) {
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_header->custom_attributes[i].name,
exr_header->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
exr_header->custom_attributes[i].value),
exr_header->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int num_blocks = exr_image->height / num_scanlines;
if (num_blocks * num_scanlines < exr_image->height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks));
size_t headerSize = memory.size();
tinyexr::tinyexr_uint64 offset =
headerSize +
static_cast<size_t>(num_blocks) *
sizeof(
tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
std::vector<std::vector<unsigned char> > data_list(
static_cast<size_t>(num_blocks));
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
} else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
} else {
assert(0);
}
}
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
}
#endif
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
size_t ii = static_cast<size_t>(i);
int start_y = num_scanlines * i;
int endY = (std::min)(num_scanlines * (i + 1), exr_image->height);
int h = endY - start_y;
std::vector<unsigned char> buf(
static_cast<size_t>(exr_image->width * h * pixel_data_size));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = f32.f;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap2(&val);
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = h16.u;
}
}
} else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
float val = reinterpret_cast<float **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
} else {
assert(0);
}
} else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exr_image->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exr_image->images)[c][(y + start_y) * exr_image->width + x];
tinyexr::swap4(&val);
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * exr_image->width) +
channel_offset_list[c] *
static_cast<size_t>(exr_image->width)));
line_ptr[x] = val;
}
}
}
}
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(buf.size());
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), buf.begin(),
buf.begin() + data_len);
} else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
1024 + static_cast<unsigned int>(
1.2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, exr_image->width, h);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
exr_image->width, h, exr_header->num_channels, zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int data_len = outSize;
memcpy(&header.at(0), &start_y, sizeof(int));
memcpy(&header.at(4), &data_len, sizeof(unsigned int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
data_list[ii].insert(data_list[ii].end(), header.begin(), header.end());
data_list[ii].insert(data_list[ii].end(), block.begin(),
block.begin() + data_len);
#else
assert(0);
#endif
} else {
assert(0);
}
} // omp parallel
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
data.insert(data.end(), data_list[i].begin(), data_list[i].end());
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size();
}
{
memory.insert(
memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks));
}
{ memory.insert(memory.end(), data.begin(), data.end()); }
assert(memory.size() > 0);
(*memory_out) = static_cast<unsigned char *>(malloc(memory.size()));
memcpy((*memory_out), &memory.at(0), memory.size());
return memory.size(); // OK
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "PIZ compression is not supported in this build.";
}
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
if (err) {
(*err) = "ZFP compression is not supported in this build.";
}
return 0;
}
#endif
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "wb");
#else
FILE *fp = fopen(filename, "wb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if ((mem_size > 0) && mem) {
fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _MSC_VER
FILE *fp = NULL;
errno_t errcode = fopen_s(&fp, filename, "rb");
if ((!errcode) || (!fp)) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
if (err) {
(*err) = "File size is zero.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Invalid magic number.";
}
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
} else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Unsupported compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
} else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
if (err) {
(*err) = "Failed to parse channel info.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
} else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh));
} else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&x));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&y));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&w));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&h));
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
} else {
if (err) {
(*err) = "Unsupported format.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
} else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
} else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
} else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
} else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
} else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f = *reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->num_tiles = 0;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
return TINYEXR_SUCCESS;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(err_str.c_str()); // may leak
#else
(*err) = strdup(err_str.c_str()); // may leak
#endif
}
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
if (err) {
(*err) = "`chunkCount' attribute is not found in the header.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
ConvertHeader(exr_header, infos[i]);
// transfoer `tiled` from version.
exr_header->tiled = exr_version->tiled;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
if (err) {
(*err) = "fread error.";
}
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
if (err) {
(*err) = "EXRHeader is not initialized.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list;
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> offset_table(
static_cast<size_t>(exr_headers[i]->chunk_count));
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
if (err) {
(*err) = "Invalid offset size.";
}
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
chunk_offset_table_list.push_back(offset_table);
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
std::vector<tinyexr::tinyexr_uint64> &offset_table =
chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (size_t c = 0; c < offset_table.size(); c++) {
const unsigned char *part_number_addr =
memory + offset_table[c] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
assert(0);
return TINYEXR_ERROR_INVALID_DATA;
}
}
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table,
memory, size);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
if (err) {
(*err) = "Invalid argument.";
}
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
fopen_s(&fp, filename, "rb");
#else
FILE *fp = fopen(filename, "rb");
#endif
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename) {
if ((components == 1) || components == 3 || components == 4) {
// OK
} else {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// Assume at least 16x16 pixels.
if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT;
EXRHeader header;
InitEXRHeader(&header);
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
} else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = {0, 0, 0, 0};
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
} else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
} else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
} else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
} else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
} else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
const char *err;
int ret = SaveEXRImageToFile(&image, &header, outfilename, &err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEIFNED
#endif // TINYEXR_IMPLEMENTATION
|
GB_unaryop__one_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_fp32_fp32
// op(A') function: GB_tran__one_fp32_fp32
// C type: float
// A type: float
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_fp32_fp32
(
float *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__trunc_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__trunc_fp64_fp64
// op(A') function: GB_unop_tran__trunc_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = trunc (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = trunc (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = trunc (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__trunc_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = trunc (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__trunc_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
filter.h | #ifndef OPENMC_TALLIES_FILTER_H
#define OPENMC_TALLIES_FILTER_H
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "openmc/hdf5_interface.h"
#include "openmc/particle.h"
#include "pugixml.hpp"
namespace openmc {
//==============================================================================
//! Stores bins and weights for filtered tally events.
//==============================================================================
class FilterMatch
{
public:
std::vector<int> bins_;
std::vector<double> weights_;
};
} // namespace openmc
// Without an explicit instantiation of vector<FilterMatch>, the Intel compiler
// will complain about the threadprivate directive on filter_matches. Note that
// this has to happen *outside* of the openmc namespace
extern template class std::vector<openmc::FilterMatch>;
namespace openmc {
//==============================================================================
//! Modifies tally score events.
//==============================================================================
class Filter
{
public:
virtual ~Filter() = default;
virtual std::string type() const = 0;
//! Uses an XML input to fill the filter's data fields.
virtual void from_xml(pugi::xml_node node) = 0;
//! Matches a tally event to a set of filter bins and weights.
//!
//! \param[out] match will contain the matching bins and corresponding
//! weights; note that there may be zero matching bins
virtual void
get_all_bins(const Particle* p, int estimator, FilterMatch& match) const = 0;
//! Writes data describing this filter to an HDF5 statepoint group.
virtual void
to_statepoint(hid_t filter_group) const
{
write_dataset(filter_group, "type", type());
write_dataset(filter_group, "n_bins", n_bins_);
}
//! Return a string describing a filter bin for the tallies.out file.
//
//! For example, an `EnergyFilter` might return the string
//! "Incoming Energy [0.625E-6, 20.0)".
virtual std::string text_label(int bin) const = 0;
virtual void initialize() {}
int n_bins_;
};
//==============================================================================
// Global variables
//==============================================================================
namespace simulation {
extern std::vector<FilterMatch> filter_matches;
#pragma omp threadprivate(filter_matches)
} // namespace simulation
namespace model {
extern "C" int32_t n_filters;
extern std::vector<std::unique_ptr<Filter>> tally_filters;
} // namespace model
//==============================================================================
extern "C" void free_memory_tally_c();
//==============================================================================
// Filter-related Fortran functions that will be called from C++
extern "C" int verify_filter(int32_t index);
extern "C" Filter* filter_from_f(int32_t index);
extern "C" void filter_update_n_bins(int32_t index);
} // namespace openmc
#endif // OPENMC_TALLIES_FILTER_H
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int8_int8)
// op(A') function: GB (_unop_tran__minv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
energetics.h | #pragma once
#include <complex>
#include <cassert>
#include "toefl/toefl.h"
#include "blueprint.h"
#include "equations.h"
namespace toefl{
///@addtogroup solvers
///@{
/**
* @brief Blas1 function
*
* computes \f[ y = \alpha x + \beta y \f]
* @param alpha a scalar
* @param x Matrix
* @param beta a scalar
* @param y Matrix (read & write) containing result on output
*/
void axpby(double alpha, const Matrix<double, TL_DFT>& x, double beta, Matrix<double, TL_DFT>& y)
{
for( unsigned i=0; i<x.rows(); i++)
for( unsigned j=0; j<x.cols(); j++)
y(i,j) = alpha*x(i,j)+beta*y(i,j);
}
/**
* @brief Blas1 dot product
*
* Computes \f[\sum_{ij} m_{1ij}m_{2ij}\f]
* @param m1
* @param m2
*
* @return Dot product
*/
double dot( const Matrix<double, TL_DFT>& m1, const Matrix<double, TL_DFT>& m2)
{
double sum = 0;
#pragma omp parallel for reduction(+: sum)
for( unsigned i=0; i<m1.rows(); i++)
for( unsigned j=0; j<m1.cols(); j++)
sum+= m1(i,j)*m2(i,j);
return sum;
}
/**
* @brief Compute dot product
*
* Computes \f[\sum_{i} v_{1i}v^*_{2i}\f]
* where * is the complex conjugate
*
* @param v1
* @param v2
*
* @return
*/
double dot( const std::vector<std::complex<double> >& v1, const std::vector<std::complex<double> >& v2)
{
assert( v1.size() == v2.size());
unsigned cols = v1.size();
std::complex<double> sum=0;
sum += v1[0]*conj( v2[0]);
for( unsigned j=1; j<cols/2; j++)
sum += 2.*v1[j]*conj( v2[j]);
if( cols%2)
sum += v1[cols/2]*conj(v2[cols/2]);
else
sum += 2.*v1[cols/2]*conj( v2[cols/2]);
//if( imag(sum) > 1e-12) std::cerr<<sum << "WARNING: Imag not zero!\n";
return real( sum);
}
/**
* @brief Compute dot product
*
* Computes \f[\sum_{i} v_{1i}v^_{2i}\f]
*
* @param v1 real vector
* @param v2 real vector
*
* @return dot product
*/
double dot( const std::vector<double>& v1, const std::vector<double>& v2)
{
assert( v1.size() == v2.size());
double sum=0;
for( unsigned i=0; i<v1.size(); i++)
sum += v1[i]*v2[i];
return sum;
}
std::vector<std::complex<double> > extract_sum_y( const Matrix<std::complex<double> >& in)
{
std::vector<std::complex<double> > out( in.cols());
for( unsigned j=0; j<in.cols(); j++)
out[j] = in(0,j);
return out;
}
std::vector<double> extract_sum_y( const Matrix<double, TL_DFT>& in)
{
std::vector<double> out( in.cols(), 0);
for( unsigned i=0; i<in.rows(); i++)
for( unsigned j=0; j<in.cols(); j++)
out[j] += in(i,j);
return out;
}
//remove the real average in y-direction
void remove_average_y( const Matrix<double, TL_DFT>& in, Matrix<double, TL_DFT>& m)
{
m = in;
std::vector<double> average = extract_sum_y( in);
#pragma omp parallel for
for( unsigned i=0; i<in.rows(); i++)
for( unsigned j=0; j<in.cols(); j++)
m(i,j) -= average[j]/in.rows();
}
/**
* @brief Compute y derivative using spectral method
*
* @param in
* @param out
* @param h grid constant
*/
void dy( const Matrix<double, TL_DFT>& in, Matrix<double, TL_DFT>& out, double h)
{
assert( &in != &out);
unsigned rows = in.rows();
for( unsigned j=0; j<in.cols(); j++)
out(0,j) = (in(1,j) - in(rows-1, j))/2./h;
#pragma omp parallel for
for( unsigned i=1; i<in.rows()-1; i++)
{
for( unsigned j=0; j<in.cols(); j++)
out(i,j) = (in(i+1,j) - in(i-1, j))/2./h;
}
for( unsigned j=0; j<in.cols(); j++)
out(rows-1,j) = (in(0,j) - in(rows-2, j))/2./h;
}
/**
* @brief Compute Energetics of INNTO code
*
* @tparam n the number of equations
*/
template<size_t n>
struct Energetics
{
typedef Matrix<double, TL_DFT> Matrix_Type;
typedef std::complex<double> complex;
Energetics( const Blueprint& bp):
rows( bp.algorithmic().ny ), cols( bp.algorithmic().nx ),
crows( rows), ccols( cols/2+1),
diff_coeff( rows, cols),
a_mu_gamma0_coeff( MatrixArray< double, TL_NONE, n-1>::construct( crows, ccols)),
dens_( MatrixArray<double, TL_DFT,n>::construct( rows, cols)),
phi_( dens_),
cdens_( MatrixArray<complex, TL_NONE, n>::construct( crows, ccols)),
cphi_(cdens_),
blue(bp), phys( bp.physical()), bound( bp.boundary()), alg( bp.algorithmic()),
dft_dft( rows, cols, FFTW_MEASURE),
a_{-1. , phys.a[0], phys.a[1]}, tau_{-1. , phys.tau[0], phys.tau[1]},
g_{phys.g_e, phys.g[0], phys.g[1]}
{
double laplace;
Poisson p( phys);
int ik;
const double kxmin2 = 2.*2.*M_PI*M_PI/(double)(bound.lx*bound.lx),
kymin2 = 2.*2.*M_PI*M_PI/(double)(bound.ly*bound.ly);
for( unsigned i = 0; i<crows; i++)
for( unsigned j = 0; j<ccols; j++)
{
ik = (i>rows/2) ? (i-rows) : i; //integer division rounded down
laplace = - kxmin2*(double)(j*j) - kymin2*(double)(ik*ik);
diff_coeff(i,j) = -phys.nu*pow(-laplace,2);
if( n==2)
{
a_mu_gamma0_coeff[0](i,j) = -phys.a[0]*phys.mu[0]*laplace*p.gamma0_i( laplace);
}
else if( n==3)
{
a_mu_gamma0_coeff[0](i,j) = -phys.a[0]*phys.mu[0]*laplace*p.gamma0_i( laplace);
a_mu_gamma0_coeff[1](i,j) = -phys.a[1]*phys.mu[1]*laplace*p.gamma0_z( laplace);
}
}
}
std::vector<double> thermal_energies(const std::array<Matrix<double, TL_DFT>, n>& dens_ );
std::vector<double> exb_energies(const Matrix<double, TL_DFT>& phi_ );
std::vector<double> diffusion( const std::array<Matrix<double, TL_DFT>, n>& density , const std::array<Matrix<double, TL_DFT>, n>& potential);
std::vector<double> gradient_flux( const std::array<Matrix<double, TL_DFT>, n>& density , const std::array<Matrix<double, TL_DFT>, n>& potential);
double capital_jot( const Matrix<double, TL_DFT>& density , const Matrix<double, TL_DFT>& potential);
double capital_a( const Matrix<double, TL_DFT>& density , const Matrix<double, TL_DFT>& potential);
private:
unsigned rows, cols;
unsigned crows, ccols;
Matrix<double, TL_DFT> diff_coeff;
std::array< Matrix< double>, n-1> a_mu_gamma0_coeff;
std::array< Matrix<double, TL_DFT>, n > dens_, phi_;
std::array< Matrix< complex>, n> cdens_, cphi_;
Blueprint blue;
Physical phys;
Boundary bound;
Algorithmic alg;
DFT_DFT dft_dft;
double a_[3];
double tau_[3];
double g_[3];
};
///////////////////////////////////////////////////////
///@cond
template<size_t n>
std::vector<double> Energetics<n>::thermal_energies(const std::array<Matrix<double, TL_DFT>, n>& density )
{
std::vector<double> energies(n);
for( unsigned i=0; i<n; i++)
energies[i] = 0.5*a_[i]*tau_[i]*dot( density[i], density[i])*alg.h*alg.h;
return energies;
}
template<size_t n>
std::vector<double> Energetics<n>::exb_energies(const Matrix<double, TL_DFT>& potential )
{
std::vector<double> energies;
double norm = alg.h*alg.h/(double)(rows*cols); //h^2 from integration and 1/NxNy from complex scalar product
phi_[0] = potential;
dft_dft.r2c( phi_[0], cphi_[0]);
for( size_t k=1; k<n; k++)
{
#pragma omp parallel for
for( size_t i = 0; i < crows; i++)
for( size_t j = 0; j < ccols; j++)
cphi_[k](i,j) = (a_mu_gamma0_coeff[k-1](i,j))*cphi_[0](i,j);
energies.push_back( 0.5*dft_dft.dot( cphi_[0], cphi_[k])*norm);
}
//averages
double norm_avg = alg.h*alg.h/(double)(rows)/(double)cols; //h*h from integration 1/Ny from average and 1/Nx from complex scalar product
std::vector<complex> sum_phi[n];
for( unsigned i=0; i<n; i++)
sum_phi[i] = extract_sum_y( cphi_[i]);
for( size_t k=1; k<n; k++)
energies.push_back( 0.5*dot( sum_phi[0], sum_phi[k])*norm_avg);
return energies;
}
template<size_t n>
double Energetics<n>::capital_jot( const Matrix<double, TL_DFT>& density , const Matrix<double, TL_DFT>& potential)
{
#pragma omp parallel for
for( unsigned i=0; i<rows; i++)
for( unsigned j=0; j<cols; j++)
dens_[0](i,j) = potential(i,j) - density(i,j);
if( blue.isEnabled( TL_MHW) )
remove_average_y( dens_[0], dens_[1]);
return -phys.d*dot( dens_[0], dens_[1])*alg.h*alg.h;
}
template<size_t n>
double Energetics<n>::capital_a( const Matrix<double, TL_DFT>& density , const Matrix<double, TL_DFT>& potential)
{
std::vector<double> sum_dens = extract_sum_y( density);
std::vector<double> sum_phi = extract_sum_y( potential);
double a = dot( sum_phi, sum_phi) - dot(sum_dens, sum_phi);
return -phys.d*a/(double)(rows)*alg.h*alg.h;
}
/////////////////////////////////////////////////////////////////////
template<size_t n>
std::vector<double> Energetics<n>::gradient_flux( const std::array<Matrix<double, TL_DFT>, n>& density , const std::array<Matrix<double, TL_DFT>, n>& potential)
{
std::vector<double> flux;
for( unsigned i=0; i<n; i++)
{
dy( potential[i], dens_[i], alg.h); //dens_ = dy phi_
flux.push_back( g_[i]*a_[i]*tau_[i]*dot( density[i], dens_[i])*alg.h*alg.h);
}
//zonal flow R
double r=0;
for( unsigned k=0; k<n; k++)
{
#pragma omp parallel for
for( unsigned i=0; i<rows;i++)
for( unsigned j=0; j<cols;j++)
dens_[k](i,j) *= density[k](i,j); //dy phi *density
std::vector<double> sum = extract_sum_y( dens_[k]);
//compute dx sum
std::vector<double> vy(sum);
vy[0] = (sum[1]-sum[cols-1])/2./alg.h;
for( unsigned i=1; i<cols-1; i++)
vy[i] = (sum[i+1]-sum[i-1])/2./alg.h;
vy[cols-1] = (sum[0]-sum[cols-2])/2./alg.h;
std::vector<double> sum_phi = extract_sum_y( potential[k]);
r+= a_[k]*dot( vy, sum_phi)*alg.h*alg.h/(double)(rows);
}
flux.push_back(r);
flux.push_back(0);
/*
phi_[0] = potential[0];
dft_dft.r2c( phi_[0], cphi_[0]);
for( size_t k=1; k<n; k++)
{
#pragma omp parallel for
for( size_t i = 0; i < crows; i++)
for( size_t j = 0; j < ccols; j++)
cphi_[k](i,j) = (-a_mu_gamma0_coeff[k-1](i,j))*cphi_[0](i,j)/(double)(rows*cols);
dft_dft.c2r( cphi_[k], phi_[k]);
#pragma omp parallel for
for( unsigned i=0; i<rows;i++)
for( unsigned j=0; j<cols;j++)
dens_[k](i,j) = dens_[0](i,j)*phi_[k](i,j); //dy phi * a/tau*(1-\Gamma_0)phi
std::vector<double> sum = extract_sum_y( dens_[k]);
std::vector<double> sum_phi = extract_sum_y( potential[k]);
//compute dx sum
std::vector<double> vy(sum);
vy[0] = (sum[1]-sum[cols-1])/2./alg.h;
for( unsigned i=1; i<cols-1; i++)
vy[i] = (sum[i+1]-sum[i-1])/2./alg.h;
vy[cols-1] = (sum[0]-sum[cols-2])/2./alg.h;
flux.push_back( dot( vy, sum_phi)*alg.h*alg.h/(double)(rows));
}
*/
return flux;
}
template<size_t n>
std::vector<double> Energetics<n>::diffusion( const std::array<Matrix<double, TL_DFT>, n>& density , const std::array<Matrix<double, TL_DFT>, n>& potential)
{
//compute -nu*laplace^2 density in dens_
dens_ = density;
#pragma omp parallel for
for( unsigned k=0; k<n; k++)
{
dft_dft.r2c( dens_[k], cdens_[k]);
for( size_t i = 0; i < crows; i++)
for( size_t j = 0; j < ccols; j++)
cdens_[k](i,j) = diff_coeff(i,j)*cdens_[k](i,j)/(double)(rows*cols);
dft_dft.c2r( cdens_[k], dens_[k]);
}
//add up diffusion
std::vector<double> diffusion_(2, 0.);
for( unsigned i=0; i<n; i++)
{
diffusion_[0] += a_[i]*dot( potential[i], dens_[i])*alg.h*alg.h;
diffusion_[0] += a_[i]*tau_[i]*dot( density[i], dens_[i])*alg.h*alg.h;
}
//compute mean diffusion
for(unsigned k=0; k<n; k++)
{
std::vector<double> sum_dens = extract_sum_y( dens_[k] );
std::vector<double> sum_phi = extract_sum_y( potential[k] );
diffusion_[1] += a_[k]*dot( sum_dens, sum_phi)*alg.h*alg.h/(double)(rows);
}
return diffusion_;
}
///@endcond
///@}
} //namespace toefl
|
omp_parallel_reduction.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */
#define MAX_FACTOR 10
#define KNOWN_PRODUCT 3628800 /* 10! */
int test_omp_parallel_reduction()
{
int sum;
int known_sum;
double dsum;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-9;
int diff;
double ddiff;
int product;
int known_product;
int logic_and;
int logic_or;
int bit_and;
int bit_or;
int exclusiv_bit_or;
int logics[LOOPCOUNT];
int i;
double dpt;
int result;
sum =0;
dsum=0;
product=1;
logic_and=1;
logic_or=0;
bit_and=1;
bit_or=0;
exclusiv_bit_or=0;
result=0;
dt = 1./3.;
known_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2;
/* Tests for integers */
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum)
for (i=1;i<=LOOPCOUNT;i++) {
sum=sum+i;
}
if(known_sum!=sum) {
result++;
fprintf(stderr,"Error in sum with integers: Result was %d instead of %d\n",sum,known_sum);
}
diff = (LOOPCOUNT*(LOOPCOUNT+1))/2;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff)
for (i=1;i<=LOOPCOUNT;++i) {
diff=diff-i;
}
if(diff != 0) {
result++;
fprintf(stderr,"Error in difference with integers: Result was %d instead of 0.\n",diff);
}
/* Tests for doubles */
dsum=0;
dpt=1;
for (i=0;i<DOUBLE_DIGITS;++i) {
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum)
for (i=0;i<DOUBLE_DIGITS;++i) {
dsum += pow(dt,i);
}
if( fabs(dsum-dknown_sum) > rounding_error ) {
result++;
fprintf(stderr,"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum);
}
dpt=1;
for (i=0;i<DOUBLE_DIGITS;++i) {
dpt*=dt;
}
fprintf(stderr,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff)
for (i=0;i<DOUBLE_DIGITS;++i) {
ddiff -= pow(dt,i);
}
if( fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,"Error in Difference with doubles: Result was %E instead of 0.0\n",ddiff);
}
/* Tests for product of integers */
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product)
for(i=1;i<=MAX_FACTOR;i++) {
product *= i;
}
known_product = KNOWN_PRODUCT;
if(known_product != product) {
result++;
fprintf(stderr,"Error in Product with integers: Result was %d instead of %d\n\n",product,known_product);
}
/* Tests for logical and */
for(i=0;i<LOOPCOUNT;i++) {
logics[i]=1;
}
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)
for(i=0;i<LOOPCOUNT;++i) {
logic_and = (logic_and && logics[i]);
}
if(!logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 1.\n");
}
logic_and = 1;
logics[LOOPCOUNT/2]=0;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&&:logic_and)
for(i=0;i<LOOPCOUNT;++i) {
logic_and = logic_and && logics[i];
}
if(logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 2.\n");
}
/* Tests for logical or */
for(i=0;i<LOOPCOUNT;i++) {
logics[i]=0;
}
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)
for(i=0;i<LOOPCOUNT;++i) {
logic_or = logic_or || logics[i];
}
if(logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 1.\n");
}
logic_or = 0;
logics[LOOPCOUNT/2]=1;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(||:logic_or)
for(i=0;i<LOOPCOUNT;++i) {
logic_or = logic_or || logics[i];
}
if(!logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 2.\n");
}
/* Tests for bitwise and */
for(i=0;i<LOOPCOUNT;++i) {
logics[i]=1;
}
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)
for(i=0;i<LOOPCOUNT;++i) {
bit_and = (bit_and & logics[i]);
}
if(!bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 1.\n");
}
bit_and = 1;
logics[LOOPCOUNT/2]=0;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(&:bit_and)
for(i=0;i<LOOPCOUNT;++i) {
bit_and = bit_and & logics[i];
}
if(bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 2.\n");
}
for(i=0;i<LOOPCOUNT;i++) {
logics[i]=0;
}
/* Tests for bitwise or */
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)
for(i=0;i<LOOPCOUNT;++i) {
bit_or = bit_or | logics[i];
}
if(bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[LOOPCOUNT/2]=1;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(|:bit_or)
for(i=0;i<LOOPCOUNT;++i) {
bit_or = bit_or | logics[i];
}
if(!bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 2\n");
}
for(i=0;i<LOOPCOUNT;i++) {
logics[i]=0;
}
/* Tests for bitwise xor */
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)
for(i=0;i<LOOPCOUNT;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
if(exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[LOOPCOUNT/2]=1;
#pragma omp parallel for schedule(dynamic,1) private(i) reduction(^:exclusiv_bit_or)
for(i=0;i<LOOPCOUNT;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
if(!exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_reduction()) {
num_failed++;
}
}
return num_failed;
}
|
qr_decompose.h | /**
* @file
* \brief Library functions to compute [QR
* decomposition](https://en.wikipedia.org/wiki/QR_decomposition) of a given
* matrix.
* \author [Krishna Vedala](https://github.com/kvedala)
*/
#ifndef NUMERICAL_METHODS_QR_DECOMPOSE_H_
#define NUMERICAL_METHODS_QR_DECOMPOSE_H_
#include <cmath>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <limits>
#include <numeric>
#include <valarray>
#ifdef _OPENMP
#include <omp.h>
#endif
/** \namespace qr_algorithm
* \brief Functions to compute [QR
* decomposition](https://en.wikipedia.org/wiki/QR_decomposition) of any
* rectangular matrix
*/
namespace qr_algorithm
{
/**
* operator to print a matrix
*/
template <typename T>
std::ostream &operator<<(std::ostream &out,
std::valarray<std::valarray<T>> const &v)
{
const int width = 12;
const char separator = ' ';
out.precision(4);
for (size_t row = 0; row < v.size(); row++)
{
for (size_t col = 0; col < v[row].size(); col++)
out << std::right << std::setw(width) << std::setfill(separator)
<< v[row][col];
out << std::endl;
}
return out;
}
/**
* operator to print a vector
*/
template <typename T>
std::ostream &operator<<(std::ostream &out, std::valarray<T> const &v)
{
const int width = 10;
const char separator = ' ';
out.precision(4);
for (size_t row = 0; row < v.size(); row++)
{
out << std::right << std::setw(width) << std::setfill(separator)
<< v[row];
}
return out;
}
/**
* Compute dot product of two vectors of equal lengths
*
* If \f$\vec{a}=\left[a_0,a_1,a_2,...,a_L\right]\f$ and
* \f$\vec{b}=\left[b_0,b_1,b_1,...,b_L\right]\f$ then
* \f$\vec{a}\cdot\vec{b}=\displaystyle\sum_{i=0}^L a_i\times b_i\f$
*
* \returns \f$\vec{a}\cdot\vec{b}\f$
*/
template <typename T>
inline double vector_dot(const std::valarray<T> &a, const std::valarray<T> &b)
{
return (a * b).sum();
// could also use following
// return std::inner_product(std::begin(a), std::end(a), std::begin(b),
// 0.f);
}
/**
* Compute magnitude of vector.
*
* If \f$\vec{a}=\left[a_0,a_1,a_2,...,a_L\right]\f$ then
* \f$\left|\vec{a}\right|=\sqrt{\displaystyle\sum_{i=0}^L a_i^2}\f$
*
* \returns \f$\left|\vec{a}\right|\f$
*/
template <typename T>
inline double vector_mag(const std::valarray<T> &a)
{
double dot = vector_dot(a, a);
return std::sqrt(dot);
}
/**
* Compute projection of vector \f$\vec{a}\f$ on \f$\vec{b}\f$ defined as
* \f[\text{proj}_\vec{b}\vec{a}=\frac{\vec{a}\cdot\vec{b}}{\left|\vec{b}\right|^2}\vec{b}\f]
*
* \returns NULL if error, otherwise pointer to output
*/
template <typename T>
std::valarray<T> vector_proj(const std::valarray<T> &a,
const std::valarray<T> &b)
{
double num = vector_dot(a, b);
double deno = vector_dot(b, b);
/*! check for division by zero using machine epsilon */
if (deno <= std::numeric_limits<double>::epsilon())
{
std::cerr << "[" << __func__ << "] Possible division by zero\n";
return a; // return vector a back
}
double scalar = num / deno;
return b * scalar;
}
/**
* Decompose matrix \f$A\f$ using [Gram-Schmidt
*process](https://en.wikipedia.org/wiki/QR_decomposition).
*
* \f{eqnarray*}{
* \text{given that}\quad A &=&
*\left[\mathbf{a}_1,\mathbf{a}_2,\ldots,\mathbf{a}_{N-1},\right]\\
* \text{where}\quad\mathbf{a}_i &=&
* \left[a_{0i},a_{1i},a_{2i},\ldots,a_{(M-1)i}\right]^T\quad\ldots\mbox{(column
* vectors)}\\
* \text{then}\quad\mathbf{u}_i &=& \mathbf{a}_i
*-\sum_{j=0}^{i-1}\text{proj}_{\mathbf{u}_j}\mathbf{a}_i\\
* \mathbf{e}_i &=&\frac{\mathbf{u}_i}{\left|\mathbf{u}_i\right|}\\
* Q &=& \begin{bmatrix}\mathbf{e}_0 & \mathbf{e}_1 & \mathbf{e}_2 & \dots &
* \mathbf{e}_{N-1}\end{bmatrix}\\
* R &=& \begin{bmatrix}\langle\mathbf{e}_0\,,\mathbf{a}_0\rangle &
* \langle\mathbf{e}_1\,,\mathbf{a}_1\rangle &
* \langle\mathbf{e}_2\,,\mathbf{a}_2\rangle & \dots \\
* 0 & \langle\mathbf{e}_1\,,\mathbf{a}_1\rangle &
* \langle\mathbf{e}_2\,,\mathbf{a}_2\rangle & \dots\\
* 0 & 0 & \langle\mathbf{e}_2\,,\mathbf{a}_2\rangle &
* \dots\\ \vdots & \vdots & \vdots & \ddots
* \end{bmatrix}\\
* \f}
*/
template <typename T>
void qr_decompose(
const std::valarray<std::valarray<T>> &A, /**< input matrix to decompose */
std::valarray<std::valarray<T>> *Q, /**< output decomposed matrix */
std::valarray<std::valarray<T>> *R /**< output decomposed matrix */
)
{
std::size_t ROWS = A.size(); // number of rows of A
std::size_t COLUMNS = A[0].size(); // number of columns of A
std::valarray<T> col_vector(ROWS);
std::valarray<T> col_vector2(ROWS);
std::valarray<T> tmp_vector(ROWS);
for (int i = 0; i < COLUMNS; i++)
{
/* for each column => R is a square matrix of NxN */
int j;
R[0][i] = 0.; /* make R upper triangular */
/* get corresponding Q vector */
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (j = 0; j < ROWS; j++)
{
tmp_vector[j] = A[j][i]; /* accumulator for uk */
col_vector[j] = A[j][i];
}
for (j = 0; j < i; j++)
{
for (int k = 0; k < ROWS; k++)
{
col_vector2[k] = Q[0][k][j];
}
col_vector2 = vector_proj(col_vector, col_vector2);
tmp_vector -= col_vector2;
}
double mag = vector_mag(tmp_vector);
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (j = 0; j < ROWS; j++) { Q[0][j][i] = tmp_vector[j] / mag; }
/* compute upper triangular values of R */
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (int kk = 0; kk < ROWS; kk++)
{
col_vector[kk] = Q[0][kk][i];
}
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (int k = i; k < COLUMNS; k++)
{
for (int kk = 0; kk < ROWS; kk++)
{
col_vector2[kk] = A[kk][k];
}
R[0][i][k] = (col_vector * col_vector2).sum();
}
}
}
} // namespace qr_algorithm
#endif // NUMERICAL_METHODS_QR_DECOMPOSE_H_
|
GB_binop__pow_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__pow_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint64)
// C=scalar+B GB (_bind1st__pow_uint64)
// C=scalar+B' GB (_bind1st_tran__pow_uint64)
// C=A+scalar GB (_bind2nd__pow_uint64)
// C=A'+scalar GB (_bind2nd_tran__pow_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_pow_uint64 (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_pow_uint64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT64 || GxB_NO_POW_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pow_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = GB_pow_uint64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = GB_pow_uint64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_uint64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_uint64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ompcompress.c | #ifdef _OPENMP
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
size_t nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
size_t blocks = (nx + 3) / 4;
size_t chunks = chunk_count_omp(stream, blocks, threads);
int chunk; /* OpenMP 2.0 requires int loop counter */
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
size_t bmin = chunk_offset(blocks, chunks, chunk + 0);
size_t bmax = chunk_offset(blocks, chunks, chunk + 1);
size_t block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
size_t x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4u)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, nx - x, 1);
else
_t2(zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
size_t nx = field->nx;
ptrdiff_t sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
size_t blocks = (nx + 3) / 4;
size_t chunks = chunk_count_omp(stream, blocks, threads);
int chunk; /* OpenMP 2.0 requires int loop counter */
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
size_t bmin = chunk_offset(blocks, chunks, chunk + 0);
size_t bmax = chunk_offset(blocks, chunks, chunk + 1);
size_t block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
size_t x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4u)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, nx - x, sx);
else
_t2(zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
size_t nx = field->nx;
size_t ny = field->ny;
ptrdiff_t sx = field->sx ? field->sx : 1;
ptrdiff_t sy = field->sy ? field->sy : (ptrdiff_t)nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
size_t bx = (nx + 3) / 4;
size_t by = (ny + 3) / 4;
size_t blocks = bx * by;
size_t chunks = chunk_count_omp(stream, blocks, threads);
int chunk; /* OpenMP 2.0 requires int loop counter */
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
size_t bmin = chunk_offset(blocks, chunks, chunk + 0);
size_t bmax = chunk_offset(blocks, chunks, chunk + 1);
size_t block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
size_t b = block;
size_t x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4u || ny - y < 4u)
_t2(zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
size_t nx = field->nx;
size_t ny = field->ny;
size_t nz = field->nz;
ptrdiff_t sx = field->sx ? field->sx : 1;
ptrdiff_t sy = field->sy ? field->sy : (ptrdiff_t)nx;
ptrdiff_t sz = field->sz ? field->sz : (ptrdiff_t)(nx * ny);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
size_t bx = (nx + 3) / 4;
size_t by = (ny + 3) / 4;
size_t bz = (nz + 3) / 4;
size_t blocks = bx * by * bz;
size_t chunks = chunk_count_omp(stream, blocks, threads);
int chunk; /* OpenMP 2.0 requires int loop counter */
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
size_t bmin = chunk_offset(blocks, chunks, chunk + 0);
size_t bmax = chunk_offset(blocks, chunks, chunk + 1);
size_t block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z) within array */
const Scalar* p = data;
size_t b = block;
size_t x, y, z;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
/* compress partial or full block */
if (nx - x < 4u || ny - y < 4u || nz - z < 4u)
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
else
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
size_t nx = field->nx;
size_t ny = field->ny;
size_t nz = field->nz;
size_t nw = field->nw;
ptrdiff_t sx = field->sx ? field->sx : 1;
ptrdiff_t sy = field->sy ? field->sy : (ptrdiff_t)nx;
ptrdiff_t sz = field->sz ? field->sz : (ptrdiff_t)(nx * ny);
ptrdiff_t sw = field->sw ? field->sw : (ptrdiff_t)(nx * ny * nz);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
size_t bx = (nx + 3) / 4;
size_t by = (ny + 3) / 4;
size_t bz = (nz + 3) / 4;
size_t bw = (nw + 3) / 4;
size_t blocks = bx * by * bz * bw;
size_t chunks = chunk_count_omp(stream, blocks, threads);
int chunk; /* OpenMP 2.0 requires int loop counter */
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
size_t bmin = chunk_offset(blocks, chunks, chunk + 0);
size_t bmax = chunk_offset(blocks, chunks, chunk + 1);
size_t block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
size_t b = block;
size_t x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4u || ny - y < 4u || nz - z < 4u || nw - w < 4u)
_t2(zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/opencl-private.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/threshold.h"
#ifdef MAGICKCORE_CLPERFMARKER
#include "CLPerfMarker.h"
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveBlurImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *AdaptiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
double
alpha,
gamma;
DoublePixelPacket
pixel;
register const double
*restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*QuantumScale*
GetPixelIntensity(edge_image,r)-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=kernel[i];
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetPixelRed(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetPixelGreen(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetPixelBlue(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetPixelOpacity(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveBlurImageChannel)
#endif
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveSharpenImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(sharp_image);
}
MagickExport Image *AdaptiveSharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sharp_image->exception);
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p,
*restrict r;
register IndexPacket
*restrict sharp_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view);
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
double
alpha,
gamma;
DoublePixelPacket
pixel;
register const double
*restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
k=kernel[i];
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetPixelRed(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetPixelGreen(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetPixelBlue(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetPixelOpacity(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveSharpenImageChannel)
#endif
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *BlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception);
return(blur_image);
}
MagickExport Image *BlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
char
geometry[MaxTextExtent];
KernelInfo
*kernel_info;
Image
*blur_image = NULL;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info,
UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const size_t order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const size_t order,const double *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o order: the number of columns and rows in the filter kernel.
%
% o kernel: An array of double representing the convolution kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,const size_t order,
const double *kernel,ExceptionInfo *exception)
{
Image
*convolve_image;
#ifdef MAGICKCORE_CLPERFMARKER
clBeginPerfMarkerAMD(__FUNCTION__,"");
#endif
convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel,
exception);
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(convolve_image);
}
MagickExport Image *ConvolveImageChannel(const Image *image,
const ChannelType channel,const size_t order,const double *kernel,
ExceptionInfo *exception)
{
Image
*convolve_image;
KernelInfo
*kernel_info;
register ssize_t
i;
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=order;
kernel_info->height=order;
kernel_info->x=(ssize_t) (order-1)/2;
kernel_info->y=(ssize_t) (order-1)/2;
kernel_info->signature=MagickSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->width*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (order*order); i++)
kernel_info->values[i]=kernel[i];
convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info,
exception);
if (convolve_image == (Image *) NULL)
convolve_image=MorphologyApply(image,channel,ConvolveMorphology,1,
kernel_info,UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *restrict f,Quantum *restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
SignedQuantum
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) p[i];
if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) p[i];
if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*(columns+2)+x_offset);
s=q-(y_offset*(columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
SignedQuantum
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) q[i];
if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) &&
((SignedQuantum) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) q[i];
if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) &&
((SignedQuantum) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
register ssize_t
i;
Quantum
*restrict buffer,
*restrict pixels;
size_t
length,
number_channels;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
despeckle_image=AccelerateDespeckleImage(image, exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse)
{
InheritException(exception,&despeckle_image->exception);
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4);
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) number_channels; i++)
{
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
if ((image->matte == MagickFalse) && (i == 3))
continue;
(void) ResetMagickMemory(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: pixels[j]=GetPixelRed(p); break;
case 1: pixels[j]=GetPixelGreen(p); break;
case 2: pixels[j]=GetPixelBlue(p); break;
case 3: pixels[j]=GetPixelOpacity(p); break;
case 4: pixels[j]=GetPixelBlack(indexes+x); break;
default: break;
}
p++;
j++;
}
j++;
}
(void) ResetMagickMemory(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(despeckle_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: SetPixelRed(q,pixels[j]); break;
case 1: SetPixelGreen(q,pixels[j]); break;
case 2: SetPixelBlue(q,pixels[j]); break;
case 3: SetPixelOpacity(q,pixels[j]); break;
case 4: SetPixelIndex(indexes+x,pixels[j]); break;
default: break;
}
q++;
j++;
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
{
status=MagickFalse;
break;
}
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
number_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->height*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info,
exception);
if (edge_image == (Image *) NULL)
edge_image=MorphologyApply(image,DefaultChannels,ConvolveMorphology,1,
kernel_info,UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->width*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception);
if (emboss_image == (Image *) NULL)
emboss_image=MorphologyApply(image,DefaultChannels,ConvolveMorphology,1,
kernel_info,UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImageChannel(emboss_image,(ChannelType)
(AllChannels &~ SyncChannels));
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FilterImage() applies a custom convolution kernel to the image.
%
% The format of the FilterImage method is:
%
% Image *FilterImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
% Image *FilterImageChannel(const Image *image,const ChannelType channel,
% const KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel,
ExceptionInfo *exception)
{
Image
*filter_image;
filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception);
return(filter_image);
}
MagickExport Image *FilterImageChannel(const Image *image,
const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception)
{
#define FilterImageTag "Filter/Image"
CacheView
*filter_view,
*image_view;
Image
*filter_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
*filter_kernel;
register ssize_t
i;
ssize_t
y;
#ifdef MAGICKCORE_CLPERFMARKER
clBeginPerfMarkerAMD(__FUNCTION__,"");
#endif
/*
Initialize filter image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((kernel->width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double)
kernel->height);
message=AcquireString("");
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) kernel->width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception);
if (filter_image != (Image *) NULL)
{
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(filter_image);
}
filter_image=CloneImage(image,0,0,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse)
{
InheritException(exception,&filter_image->exception);
filter_image=DestroyImage(filter_image);
return((Image *) NULL);
}
/*
Normalize kernel.
*/
filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->width*sizeof(*filter_kernel)));
if (filter_kernel == (MagickRealType *) NULL)
{
filter_image=DestroyImage(filter_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel->width*kernel->width); i++)
filter_kernel[i]=(MagickRealType) kernel->values[i];
/*
Filter image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
filter_view=AcquireAuthenticCacheView(filter_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,filter_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict filter_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y-
(ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
pixel;
register const MagickRealType
*restrict k;
register const PixelPacket
*restrict kernel_pixels;
register ssize_t
u;
ssize_t
v;
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=filter_kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=filter_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=filter_kernel;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u);
k++;
}
kernel_indexes+=image->columns+kernel->width;
}
SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index));
}
}
else
{
double
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
GetPixelOpacity(kernel_pixels+u)));
pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u);
pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u);
pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u);
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=filter_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*restrict kernel_indexes;
k=filter_kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
kernel_indexes+=image->columns+kernel->width;
}
SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(filter_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FilterImageChannel)
#endif
proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
filter_image->type=image->type;
filter_view=DestroyCacheView(filter_view);
image_view=DestroyCacheView(image_view);
filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel);
if (status == MagickFalse)
filter_image=DestroyImage(filter_image);
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(filter_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
% Image *GaussianBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *GaussianBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
char
geometry[MaxTextExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info,
exception);
if (blur_image == (Image *) NULL)
blur_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info,
UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
% Image *MotionBlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double *GetMotionBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
Image
*motion_blur;
motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle,
exception);
return(motion_blur);
}
MagickExport Image *MotionBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset,
exception);
if (blur_image != (Image *) NULL)
return blur_image;
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
qixel;
PixelPacket
pixel;
register const IndexPacket
*restrict indexes;
register double
*restrict k;
register ssize_t
i;
k=kernel;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
qixel.red+=(*k)*pixel.red;
qixel.green+=(*k)*pixel.green;
qixel.blue+=(*k)*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*(*indexes);
}
k++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index));
}
else
{
double
alpha,
gamma;
alpha=0.0;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
qixel.red+=(*k)*alpha*pixel.red;
qixel.green+=(*k)*alpha*pixel.green;
qixel.blue+=(*k)*alpha*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*alpha*GetPixelIndex(indexes);
}
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MotionBlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double width,
% const double sigma,ExceptionInfo *exception)
% Image *KuwaharaImageChannel(const Image *image,const ChannelType channel,
% const double width,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*kuwahara_image;
kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(kuwahara_image);
}
MagickExport Image *KuwaharaImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kiwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) channel;
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse)
{
InheritException(exception,&kuwahara_image->exception);
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,kuwahara_image,kuwahara_image->rows,1)
#endif
for (y=0; y < (ssize_t) kuwahara_image->rows; y++)
{
register IndexPacket
*restrict kuwahara_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view);
for (x=0; x < (ssize_t) kuwahara_image->columns; x++)
{
double
min_variance;
MagickPixelPacket
pixel;
RectangleInfo
quadrant,
target;
register ssize_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const PixelPacket
*restrict p;
double
variance;
MagickPixelPacket
mean;
register const PixelPacket
*restrict k;
register ssize_t
n;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const PixelPacket *) NULL)
break;
GetMagickPixelPacket(image,&mean);
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
mean.red+=(double) k->red;
mean.green+=(double) k->green;
mean.blue+=(double) k->blue;
k++;
}
mean.red/=(double) (width*width);
mean.green/=(double) (width*width);
mean.blue/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(image,k);
variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean));
k++;
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
(void) InterpolateMagickPixelPacket(gaussian_image,image_view,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,
(double) target.y+target.height/2.0,&pixel,exception);
SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_KuwaharaImage)
#endif
proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MaxTextExtent],
label[MaxTextExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel);
if (i == (NumberTiles/2))
{
(void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g",
degrees,2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImageChannel(preview_image,DefaultChannels,gamma);
(void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse);
(void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MaxTextExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MaxTextExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MaxTextExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MaxTextExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MaxTextExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"poisson",MaxTextExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"threshold %g",
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*
percentage/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g",
degrees,degrees);
break;
}
case RaisePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
geometry.width=(size_t) (2*i+2);
geometry.height=(size_t) (2*i+2);
geometry.x=(i-1)/2;
geometry.y=(i-1)/2;
(void) RaiseImage(preview_image,&geometry,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold);
(void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g",
0.5*degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MaxTextExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MaxTextExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MaxTextExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ",
factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a rotational blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RotationalBlurImageChannel(const Image *image,
% const ChannelType channel,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the rotational blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception);
return(blur_image);
}
MagickExport Image *RotationalBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
blur_image=AccelerateRadialBlurImage(image,channel,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(MagickRealType) (n-1);
cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (MagickRealType *) NULL) ||
(sin_theta == (MagickRealType *) NULL))
{
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(MagickRealType) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
qixel;
MagickRealType
normalize,
radius;
PixelPacket
pixel;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
normalize=0.0;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
qixel.red+=pixel.red;
qixel.green+=pixel.green;
qixel.blue+=pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*indexes);
}
normalize+=1.0;
}
normalize=PerceptibleReciprocal(normalize);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(normalize*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(normalize*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index));
}
else
{
double
alpha,
gamma;
alpha=1.0;
gamma=0.0;
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
qixel.red+=alpha*pixel.red;
qixel.green+=alpha*pixel.green;
qixel.blue+=alpha*pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=alpha*(*indexes);
}
gamma+=alpha;
normalize+=1.0;
}
gamma=PerceptibleReciprocal(gamma);
normalize=PerceptibleReciprocal(normalize);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RotationalBlurImageChannel)
#endif
proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
% Image *SelectiveBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma,
threshold,exception);
return(blur_image);
}
MagickExport Image *SelectiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
double
*kernel;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
width*sizeof(*kernel)));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace);
if (status == MagickFalse)
{
InheritException(exception,&luminance_image->exception);
kernel=(double *) RelinquishAlignedMemory(kernel);
blur_image=DestroyImage(blur_image);
luminance_image=DestroyImage(luminance_image);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L));
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
gamma;
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict l,
*restrict p;
register IndexPacket
*restrict blur_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
contrast;
DoublePixelPacket
pixel;
MagickRealType
intensity;
register const double
*restrict k;
register ssize_t
u;
ssize_t
j,
v;
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=kernel;
intensity=GetPixelIntensity(image,p+center);
gamma=0.0;
j=0;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.red+=(*k)*GetPixelRed(p+u+j);
pixel.green+=(*k)*GetPixelGreen(p+u+j);
pixel.blue+=(*k)*GetPixelBlue(p+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
else
{
MagickRealType
alpha;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j));
pixel.red+=(*k)*alpha*GetPixelRed(p+u+j);
pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j);
pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j);
pixel.opacity+=(*k)*GetPixelOpacity(p+u+j);
gamma+=(*k)*alpha;
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.opacity+=(*k)*GetPixelOpacity(p+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(MagickRealType) (QuantumScale*
GetPixelAlpha(p+u+j));
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
p++;
l++;
q++;
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SelectiveBlurImageChannel)
#endif
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(double *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shade_image->exception);
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
MagickRealType
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const PixelPacket
*restrict p,
*restrict s0,
*restrict s1,
*restrict s2;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
s0=p+1;
s1=s0+image->columns+2;
s2=s1+image->columns+2;
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
/*
Determine the surface normal and compute shading.
*/
normal.x=(double) (GetPixelIntensity(linear_image,s0-1)+
GetPixelIntensity(linear_image,s1-1)+
GetPixelIntensity(linear_image,s2-1)-
GetPixelIntensity(linear_image,s0+1)-
GetPixelIntensity(linear_image,s1+1)-
GetPixelIntensity(linear_image,s2+1));
normal.y=(double) (GetPixelIntensity(linear_image,s2-1)+
GetPixelIntensity(linear_image,s2)+
GetPixelIntensity(linear_image,s2+1)-
GetPixelIntensity(linear_image,s0-1)-
GetPixelIntensity(linear_image,s0)-
GetPixelIntensity(linear_image,s0+1));
if ((normal.x == 0.0) && (normal.y == 0.0))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z*
normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
if (gray != MagickFalse)
{
SetPixelRed(q,shade);
SetPixelGreen(q,shade);
SetPixelBlue(q,shade);
}
else
{
SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1)));
SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1)));
SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1)));
}
q->opacity=s1->opacity;
s0++;
s1++;
s2++;
q++;
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadeImage)
#endif
proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *SharpenImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception);
return(sharp_image);
}
MagickExport Image *SharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->height*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info,
UndefinedCompositeOp,0.0,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a block defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: Choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
RandomInfo
**restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
spread_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse)
{
InheritException(exception,&spread_image->exception);
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(spread_image,&bias);
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,spread_image,spread_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) spread_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(spread_view);
pixel=bias;
for (x=0; x < (ssize_t) spread_image->columns; x++)
{
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x+width*(GetPseudoRandomValue(
random_info[id])-0.5),(double) y+width*(GetPseudoRandomValue(
random_info[id])-0.5),&pixel,exception);
SetPixelPacket(spread_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SpreadImage)
#endif
proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
% Image *UnsharpMaskImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double gain,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain,
threshold,exception);
return(sharp_image);
}
MagickExport Image *UnsharpMaskImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double gain,const double threshold,ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain,
threshold,exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels),
radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(MagickRealType) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
pixel;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict unsharp_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view);
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
if (fabs(2.0*pixel.red) < quantum_threshold)
pixel.red=(MagickRealType) GetPixelRed(p);
else
pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain);
SetPixelRed(q,ClampToQuantum(pixel.red));
}
if ((channel & GreenChannel) != 0)
{
pixel.green=GetPixelGreen(p)-(MagickRealType) q->green;
if (fabs(2.0*pixel.green) < quantum_threshold)
pixel.green=(MagickRealType) GetPixelGreen(p);
else
pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain);
SetPixelGreen(q,ClampToQuantum(pixel.green));
}
if ((channel & BlueChannel) != 0)
{
pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue;
if (fabs(2.0*pixel.blue) < quantum_threshold)
pixel.blue=(MagickRealType) GetPixelBlue(p);
else
pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain);
SetPixelBlue(q,ClampToQuantum(pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity;
if (fabs(2.0*pixel.opacity) < quantum_threshold)
pixel.opacity=(MagickRealType) GetPixelOpacity(p);
else
pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain);
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel.index=GetPixelIndex(indexes+x)-(MagickRealType)
GetPixelIndex(unsharp_indexes+x);
if (fabs(2.0*pixel.index) < quantum_threshold)
pixel.index=(MagickRealType) GetPixelIndex(indexes+x);
else
pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+
(pixel.index*gain);
SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UnsharpMaskImageChannel)
#endif
proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
csr_matop.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix operation functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "csr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddFirstPass:
*
* Performs the first pass needed for Matrix/Matrix addition (C = A + B).
* This function:
* 1) Computes the row pointer of the resulting matrix C_i
* 2) Allocates memory for the matrix C and returns it to the user
*
* Notes: 1) It can be used safely inside OpenMP parallel regions.
* 2) firstrow, lastrow and marker are private variables.
* 3) The remaining arguments are shared variables.
* 4) twspace (thread workspace) must be allocated outside the
* parallel region.
* 5) The mapping arrays map_A2C and map_B2C are used when adding
* off-diagonal matrices. They can be set to NULL pointer when
* adding diagonal matrices.
* 6) Assumes that the elements of C_i are initialized to zero.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixAddFirstPass( HYPRE_Int firstrow,
HYPRE_Int lastrow,
HYPRE_Int *twspace,
HYPRE_Int *marker,
HYPRE_Int *map_A2C,
HYPRE_Int *map_B2C,
hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int nrows_C,
HYPRE_Int nnzrows_C,
HYPRE_Int ncols_C,
HYPRE_Int *rownnz_C,
HYPRE_MemoryLocation memory_location_C,
HYPRE_Int *C_i,
hypre_CSRMatrix **C_ptr )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int i, ia, ib, ic, iic, ii, i1;
HYPRE_Int jcol, jj;
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int num_nonzeros;
/* Initialize marker array */
for (i = 0; i < ncols_C; i++)
{
marker[i] = -1;
}
ii = hypre_GetThreadNum();
num_nonzeros = 0;
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
if (map_A2C)
{
for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++)
{
jcol = map_A2C[A_j[ia]];
marker[jcol] = iic;
num_nonzeros++;
}
}
else
{
for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = iic;
num_nonzeros++;
}
}
if (map_B2C)
{
for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++)
{
jcol = map_B2C[B_j[ib]];
if (marker[jcol] != iic)
{
marker[jcol] = iic;
num_nonzeros++;
}
}
}
else
{
for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != iic)
{
marker[jcol] = iic;
num_nonzeros++;
}
}
}
C_i[iic + 1] = num_nonzeros;
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct C_i - phase 1 */
if (ii)
{
jj = twspace[0];
for (i1 = 1; i1 < ii; i1++)
{
jj += twspace[i1];
}
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
C_i[iic + 1] += jj;
}
}
else
{
num_nonzeros = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
num_nonzeros += twspace[i1];
}
*C_ptr = hypre_CSRMatrixCreate(nrows_C, ncols_C, num_nonzeros);
hypre_CSRMatrixI(*C_ptr) = C_i;
hypre_CSRMatrixRownnz(*C_ptr) = rownnz_C;
hypre_CSRMatrixNumRownnz(*C_ptr) = nnzrows_C;
hypre_CSRMatrixInitialize_v2(*C_ptr, 0, memory_location_C);
}
/* Correct C_i - phase 2 */
if (rownnz_C != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ic = firstrow; ic < (lastrow - 1); ic++)
{
for (iic = rownnz_C[ic] + 1; iic < rownnz_C[ic + 1]; iic++)
{
hypre_assert(C_i[iic + 1] == 0);
C_i[iic + 1] = C_i[rownnz_C[ic] + 1];
}
}
if (ii < (num_threads - 1))
{
for (iic = rownnz_C[lastrow - 1] + 1; iic < rownnz_C[lastrow]; iic++)
{
hypre_assert(C_i[iic + 1] == 0);
C_i[iic + 1] = C_i[rownnz_C[lastrow - 1] + 1];
}
}
else
{
for (iic = rownnz_C[lastrow - 1] + 1; iic < nrows_C; iic++)
{
hypre_assert(C_i[iic + 1] == 0);
C_i[iic + 1] = C_i[rownnz_C[lastrow - 1] + 1];
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
#ifdef HYPRE_DEBUG
if (!ii)
{
for (i = 0; i < nrows_C; i++)
{
hypre_assert(C_i[i] <= C_i[i + 1]);
hypre_assert(((A_i[i + 1] - A_i[i]) +
(B_i[i + 1] - B_i[i])) >=
(C_i[i + 1] - C_i[i]));
hypre_assert((C_i[i + 1] - C_i[i]) >= (A_i[i + 1] - A_i[i]));
hypre_assert((C_i[i + 1] - C_i[i]) >= (B_i[i + 1] - B_i[i]));
}
hypre_assert((C_i[nrows_C] - C_i[0]) == num_nonzeros);
}
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddSecondPass:
*
* Performs the second pass needed for Matrix/Matrix addition (C = A + B).
* This function computes C_j and C_data.
*
* Notes: see notes for hypre_CSRMatrixAddFirstPass
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixAddSecondPass( HYPRE_Int firstrow,
HYPRE_Int lastrow,
HYPRE_Int *twspace,
HYPRE_Int *marker,
HYPRE_Int *map_A2C,
HYPRE_Int *map_B2C,
HYPRE_Int *rownnz_C,
HYPRE_Complex alpha,
HYPRE_Complex beta,
hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
hypre_CSRMatrix *C )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int nnzs_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int nnzs_B = hypre_CSRMatrixNumNonzeros(B);
HYPRE_Int *C_i = hypre_CSRMatrixI(C);
HYPRE_Int *C_j = hypre_CSRMatrixJ(C);
HYPRE_Complex *C_data = hypre_CSRMatrixData(C);
HYPRE_Int ncols_C = hypre_CSRMatrixNumCols(C);
HYPRE_Int ia, ib, ic, iic;
HYPRE_Int jcol, pos;
hypre_assert(( map_A2C && map_B2C) ||
(!map_A2C && !map_B2C) ||
( map_A2C && (nnzs_B == 0)) ||
( map_B2C && (nnzs_A == 0)));
/* Initialize marker vector */
for (ia = 0; ia < ncols_C; ia++)
{
marker[ia] = -1;
}
pos = C_i[rownnz_C ? rownnz_C[firstrow] : firstrow];
if ((map_A2C && map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0)))
{
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++)
{
jcol = map_A2C[A_j[ia]];
C_j[pos] = jcol;
C_data[pos] = alpha * A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++)
{
jcol = map_B2C[B_j[ib]];
if (marker[jcol] < C_i[iic])
{
C_j[pos] = jcol;
C_data[pos] = beta * B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
hypre_assert(C_j[marker[jcol]] == jcol);
C_data[marker[jcol]] += beta * B_data[ib];
}
}
hypre_assert(pos == C_i[iic + 1]);
} /* end for loop */
}
else
{
for (ic = firstrow; ic < lastrow; ic++)
{
iic = rownnz_C ? rownnz_C[ic] : ic;
for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = alpha * A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[iic]; ib < B_i[iic + 1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[iic])
{
C_j[pos] = jcol;
C_data[pos] = beta * B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
hypre_assert(C_j[marker[jcol]] == jcol);
C_data[marker[jcol]] += beta * B_data[ib];
}
}
hypre_assert(pos == C_i[iic + 1]);
} /* end for loop */
}
hypre_assert(pos == C_i[rownnz_C ? rownnz_C[lastrow - 1] + 1 : lastrow]);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAdd:
*
* Adds two CSR Matrices A and B and returns a CSR Matrix C = alpha*A + beta*B;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixAddHost ( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
HYPRE_Complex beta,
hypre_CSRMatrix *B )
{
/* CSRMatrix A */
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
/* CSRMatrix B */
HYPRE_Int *rownnz_B = hypre_CSRMatrixRownnz(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int nnzrows_B = hypre_CSRMatrixNumRownnz(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
/* CSRMatrix C */
hypre_CSRMatrix *C;
HYPRE_Int *C_i;
HYPRE_Int *rownnz_C;
HYPRE_Int nnzrows_C;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n");
return NULL;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
/* Set nonzero rows data of diag_C */
nnzrows_C = nrows_A;
if ((nnzrows_A < nrows_A) && (nnzrows_B < nrows_B))
{
hypre_MergeOrderedArrays(nnzrows_A, rownnz_A,
nnzrows_B, rownnz_B,
&nnzrows_C, &rownnz_C);
}
else
{
rownnz_C = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ns, ne;
HYPRE_Int *marker = NULL;
hypre_partition1D(nnzrows_C, hypre_NumActiveThreads(), hypre_GetThreadNum(), &ns, &ne);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker, NULL, NULL,
A, B, nrows_A, nnzrows_C, ncols_A, rownnz_C,
memory_location_C, C_i, &C);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker, NULL, NULL,
rownnz_C, alpha, beta, A, B, C);
hypre_TFree(marker, HYPRE_MEMORY_HOST);
} /* end of parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixAdd( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
HYPRE_Complex beta,
hypre_CSRMatrix *B)
{
hypre_CSRMatrix *C = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
if (exec == HYPRE_EXEC_DEVICE)
{
C = hypre_CSRMatrixAddDevice(alpha, A, beta, B);
}
else
#endif
{
C = hypre_CSRMatrixAddHost(alpha, A, beta, B);
}
return C;
}
#if 0
/*--------------------------------------------------------------------------
* hypre_CSRMatrixBigAdd:
*
* RL: comment it out which was used in ams.c. Should be combined with
* above hypre_CSRMatrixAddHost whenever it is needed again
*
* Adds two CSR Matrices A and B with column indices stored as HYPRE_BigInt
* and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_BigInt *C_j;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (nrows_A != nrows_B || ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n");
return NULL;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ia, ib, ic, num_nonzeros;
HYPRE_Int ns, ne, pos;
HYPRE_BigInt jcol;
HYPRE_Int ii, num_threads;
HYPRE_Int jj;
HYPRE_Int *marker = NULL;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nrows_A, num_threads, ii, &ns, &ne);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
/* First pass */
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
C_i[ic] = num_nonzeros;
for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
for (ib = B_i[ic]; ib < B_i[ic + 1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
C_i[ic + 1] = num_nonzeros;
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct row pointer */
if (ii)
{
jj = twspace[0];
for (ic = 1; ic < ii; ic++)
{
jj += twspace[ia];
}
for (ic = ns; ic < ne; ic++)
{
C_i[ic] += jj;
}
}
else
{
C_i[nrows_A] = 0;
for (ic = 0; ic < num_threads; ic++)
{
C_i[nrows_A] += twspace[ic];
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 1, memory_location_C);
C_j = hypre_CSRMatrixBigJ(C);
C_data = hypre_CSRMatrixData(C);
}
/* Second pass */
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
pos = C_i[ns];
for (ic = ns; ic < ne; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
for (ib = B_i[ic]; ib < B_i[ic + 1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
} /* end of parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
#endif
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMultiplyHost
*
* Multiplies two CSR Matrices A and B and returns a CSR Matrix C;
*
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A,
hypre_CSRMatrix *B )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_nnz_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
HYPRE_Int num_nnz_B = hypre_CSRMatrixNumNonzeros(B);
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros;
HYPRE_Int counter;
HYPRE_Complex a_entry, b_entry;
HYPRE_Int allsquare = 0;
HYPRE_Int *twspace;
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (ncols_A != nrows_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n");
return NULL;
}
if (nrows_A == ncols_B)
{
allsquare = 1;
}
if ((num_nnz_A == 0) || (num_nnz_B == 0))
{
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, 0);
hypre_CSRMatrixNumRownnz(C) = 0;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
return C;
}
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, counter, a_entry, b_entry)
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, ii, jj;
HYPRE_Int num_threads;
HYPRE_Int i1, iic;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne);
B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST);
for (ib = 0; ib < ncols_B; ib++)
{
B_marker[ib] = -1;
}
HYPRE_ANNOTATE_REGION_BEGIN("%s", "First pass");
/* First pass: compute sizes of C rows. */
num_nonzeros = 0;
for (ic = ns; ic < ne; ic++)
{
if (rownnz_A)
{
iic = rownnz_A[ic];
C_i[iic] = num_nonzeros;
}
else
{
iic = ic;
C_i[iic] = num_nonzeros;
if (allsquare)
{
B_marker[iic] = iic;
num_nonzeros++;
}
}
for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++)
{
ja = A_j[ia];
for (ib = B_i[ja]; ib < B_i[ja + 1]; ib++)
{
jb = B_j[ib];
if (B_marker[jb] != iic)
{
B_marker[jb] = iic;
num_nonzeros++;
}
}
}
}
twspace[ii] = num_nonzeros;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct C_i - phase 1 */
if (ii)
{
jj = twspace[0];
for (i1 = 1; i1 < ii; i1++)
{
jj += twspace[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
iic = rownnz_A ? rownnz_A[i1] : i1;
C_i[iic] += jj;
}
}
else
{
C_i[nrows_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
C_i[nrows_A] += twspace[i1];
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
}
/* Correct C_i - phase 2 */
if (rownnz_A != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (ic = ns; ic < (ne - 1); ic++)
{
for (iic = rownnz_A[ic] + 1; iic < rownnz_A[ic + 1]; iic++)
{
C_i[iic] = C_i[rownnz_A[ic + 1]];
}
}
if (ii < (num_threads - 1))
{
for (iic = rownnz_A[ne - 1] + 1; iic < rownnz_A[ne]; iic++)
{
C_i[iic] = C_i[rownnz_A[ne]];
}
}
else
{
for (iic = rownnz_A[ne - 1] + 1; iic < nrows_A; iic++)
{
C_i[iic] = C_i[nrows_A];
}
}
}
/* End of First Pass */
HYPRE_ANNOTATE_REGION_END("%s", "First pass");
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Second pass: Fill in C_data and C_j. */
HYPRE_ANNOTATE_REGION_BEGIN("%s", "Second pass");
for (ib = 0; ib < ncols_B; ib++)
{
B_marker[ib] = -1;
}
counter = rownnz_A ? C_i[rownnz_A[ns]] : C_i[ns];
for (ic = ns; ic < ne; ic++)
{
if (rownnz_A)
{
iic = rownnz_A[ic];
}
else
{
iic = ic;
if (allsquare)
{
B_marker[ic] = counter;
C_data[counter] = 0;
C_j[counter] = ic;
counter++;
}
}
for (ia = A_i[iic]; ia < A_i[iic + 1]; ia++)
{
ja = A_j[ia];
a_entry = A_data[ia];
for (ib = B_i[ja]; ib < B_i[ja + 1]; ib++)
{
jb = B_j[ib];
b_entry = B_data[ib];
if (B_marker[jb] < C_i[iic])
{
B_marker[jb] = counter;
C_j[B_marker[jb]] = jb;
C_data[B_marker[jb]] = a_entry * b_entry;
counter++;
}
else
{
C_data[B_marker[jb]] += a_entry * b_entry;
}
}
}
}
HYPRE_ANNOTATE_REGION_END("%s", "Second pass");
/* End of Second Pass */
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
} /*end parallel region */
#ifdef HYPRE_DEBUG
for (ic = 0; ic < nrows_A; ic++)
{
hypre_assert(C_i[ic] <= C_i[ic + 1]);
}
#endif
// Set rownnz and num_rownnz
hypre_CSRMatrixSetRownnz(C);
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
return C;
}
hypre_CSRMatrix*
hypre_CSRMatrixMultiply( hypre_CSRMatrix *A,
hypre_CSRMatrix *B)
{
hypre_CSRMatrix *C = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A),
hypre_CSRMatrixMemoryLocation(B) );
if (exec == HYPRE_EXEC_DEVICE)
{
C = hypre_CSRMatrixMultiplyDevice(A, B);
}
else
#endif
{
C = hypre_CSRMatrixMultiplyHost(A, B);
}
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A,
HYPRE_Real tol )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
hypre_CSRMatrix *B;
HYPRE_Complex *B_data;
HYPRE_Int *B_i;
HYPRE_Int *B_j;
HYPRE_Int zeros;
HYPRE_Int i, j;
HYPRE_Int pos_A, pos_B;
zeros = 0;
for (i = 0; i < num_nonzeros; i++)
{
if (hypre_cabs(A_data[i]) <= tol)
{
zeros++;
}
}
if (zeros)
{
B = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros - zeros);
hypre_CSRMatrixInitialize(B);
B_i = hypre_CSRMatrixI(B);
B_j = hypre_CSRMatrixJ(B);
B_data = hypre_CSRMatrixData(B);
B_i[0] = 0;
pos_A = pos_B = 0;
for (i = 0; i < nrows_A; i++)
{
for (j = A_i[i]; j < A_i[i + 1]; j++)
{
if (hypre_cabs(A_data[j]) <= tol)
{
pos_A++;
}
else
{
B_data[pos_B] = A_data[pos_A];
B_j[pos_B] = A_j[pos_A];
pos_B++;
pos_A++;
}
}
B_i[i + 1] = pos_B;
}
return B;
}
else
{
return NULL;
}
}
/******************************************************************************
*
* Finds transpose of a hypre_CSRMatrix
*
*****************************************************************************/
/**
* idx = idx2*dim1 + idx1
* -> ret = idx1*dim2 + idx2
* = (idx%dim1)*dim2 + idx/dim1
*/
static inline HYPRE_Int
transpose_idx (HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2)
{
return idx % dim1 * dim2 + idx / dim1;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTransposeHost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int num_nnzs_A = hypre_CSRMatrixNumNonzeros(A);
HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A);
HYPRE_Complex *AT_data;
HYPRE_Int *AT_j;
HYPRE_Int num_rows_AT;
HYPRE_Int num_cols_AT;
HYPRE_Int num_nnzs_AT;
HYPRE_Int max_col;
HYPRE_Int i, j;
/*--------------------------------------------------------------
* First, ascertain that num_cols and num_nonzeros has been set.
* If not, set them.
*--------------------------------------------------------------*/
HYPRE_ANNOTATE_FUNC_BEGIN;
if (!num_nnzs_A)
{
num_nnzs_A = A_i[num_rows_A];
}
if (num_rows_A && num_nnzs_A && ! num_cols_A)
{
max_col = -1;
for (i = 0; i < num_rows_A; ++i)
{
for (j = A_i[i]; j < A_i[i + 1]; j++)
{
if (A_j[j] > max_col)
{
max_col = A_j[j];
}
}
}
num_cols_A = max_col + 1;
}
num_rows_AT = num_cols_A;
num_cols_AT = num_rows_A;
num_nnzs_AT = num_nnzs_A;
*AT = hypre_CSRMatrixCreate(num_rows_AT, num_cols_AT, num_nnzs_AT);
hypre_CSRMatrixMemoryLocation(*AT) = memory_location;
if (num_cols_A == 0)
{
// JSP: parallel counting sorting breaks down
// when A has no columns
hypre_CSRMatrixInitialize(*AT);
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
AT_j = hypre_CTAlloc(HYPRE_Int, num_nnzs_AT, memory_location);
hypre_CSRMatrixJ(*AT) = AT_j;
if (data)
{
AT_data = hypre_CTAlloc(HYPRE_Complex, num_nnzs_AT, memory_location);
hypre_CSRMatrixData(*AT) = AT_data;
}
/*-----------------------------------------------------------------
* Parallel count sort
*-----------------------------------------------------------------*/
HYPRE_Int *bucket = hypre_CTAlloc(HYPRE_Int, (num_cols_A + 1) * hypre_NumThreads(),
HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ii, num_threads, ns, ne;
HYPRE_Int i, j, j0, j1, ir;
HYPRE_Int idx, offset;
HYPRE_Int transpose_i;
HYPRE_Int transpose_i_minus_1;
HYPRE_Int transpose_i0;
HYPRE_Int transpose_j0;
HYPRE_Int transpose_j1;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne);
/*-----------------------------------------------------------------
* Count the number of entries that will go into each bucket
* bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array
*-----------------------------------------------------------------*/
if (rownnz_A == NULL)
{
for (j = A_i[ns]; j < A_i[ne]; ++j)
{
bucket[ii * num_cols_A + A_j[j]]++;
}
}
else
{
for (i = ns; i < ne; i++)
{
ir = rownnz_A[i];
for (j = A_i[ir]; j < A_i[ir + 1]; ++j)
{
bucket[ii * num_cols_A + A_j[j]]++;
}
}
}
/*-----------------------------------------------------------------
* Parallel prefix sum of bucket with length num_colsA * num_threads
* accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads]
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ii * num_cols_A + 1; i < (ii + 1)*num_cols_A; ++i)
{
transpose_i = transpose_idx(i, num_threads, num_cols_A);
transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_cols_A);
bucket[transpose_i] += bucket[transpose_i_minus_1];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#pragma omp master
#endif
{
for (i = 1; i < num_threads; ++i)
{
j0 = num_cols_A * i - 1;
j1 = num_cols_A * (i + 1) - 1;
transpose_j0 = transpose_idx(j0, num_threads, num_cols_A);
transpose_j1 = transpose_idx(j1, num_threads, num_cols_A);
bucket[transpose_j1] += bucket[transpose_j0];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii > 0)
{
transpose_i0 = transpose_idx(num_cols_A * ii - 1, num_threads, num_cols_A);
offset = bucket[transpose_i0];
for (i = ii * num_cols_A; i < (ii + 1)*num_cols_A - 1; ++i)
{
transpose_i = transpose_idx(i, num_threads, num_cols_A);
bucket[transpose_i] += offset;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*----------------------------------------------------------------
* Load the data and column numbers of AT
*----------------------------------------------------------------*/
if (data)
{
for (i = ne - 1; i >= ns; --i)
{
ir = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j)
{
idx = A_j[j];
--bucket[ii * num_cols_A + idx];
offset = bucket[ii * num_cols_A + idx];
AT_data[offset] = A_data[j];
AT_j[offset] = ir;
}
}
}
else
{
for (i = ne - 1; i >= ns; --i)
{
ir = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j)
{
idx = A_j[j];
--bucket[ii * num_cols_A + idx];
offset = bucket[ii * num_cols_A + idx];
AT_j[offset] = ir;
}
}
}
} /* end parallel region */
hypre_CSRMatrixI(*AT) = hypre_TAlloc(HYPRE_Int, num_cols_A + 1, memory_location);
hypre_TMemcpy(hypre_CSRMatrixI(*AT), bucket, HYPRE_Int, num_cols_A + 1, memory_location,
HYPRE_MEMORY_HOST);
hypre_CSRMatrixI(*AT)[num_cols_A] = num_nnzs_A;
hypre_TFree(bucket, HYPRE_MEMORY_HOST);
// Set rownnz and num_rownnz
if (hypre_CSRMatrixNumRownnz(A) < num_rows_A)
{
hypre_CSRMatrixSetRownnz(*AT);
}
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixTranspose(hypre_CSRMatrix *A,
hypre_CSRMatrix **AT,
HYPRE_Int data)
{
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixTransposeDevice(A, AT, data);
}
else
#endif
{
ierr = hypre_CSRMatrixTransposeHost(A, AT, data);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSplit
*--------------------------------------------------------------------------*/
/* RL: TODO add memory locations */
HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext,
HYPRE_BigInt first_col_diag_B,
HYPRE_BigInt last_col_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_BigInt *col_map_offd_B,
HYPRE_Int *num_cols_offd_C_ptr,
HYPRE_BigInt **col_map_offd_C_ptr,
hypre_CSRMatrix **Bext_diag_ptr,
hypre_CSRMatrix **Bext_offd_ptr)
{
HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext);
HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext);
HYPRE_Int B_ext_diag_size = 0;
HYPRE_Int B_ext_offd_size = 0;
HYPRE_Int *B_ext_diag_i = NULL;
HYPRE_Int *B_ext_diag_j = NULL;
HYPRE_Complex *B_ext_diag_data = NULL;
HYPRE_Int *B_ext_offd_i = NULL;
HYPRE_Int *B_ext_offd_j = NULL;
HYPRE_Complex *B_ext_offd_data = NULL;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_BigInt *temp;
HYPRE_Int max_num_threads;
HYPRE_Int cnt = 0;
hypre_CSRMatrix *Bext_diag = NULL;
hypre_CSRMatrix *Bext_offd = NULL;
HYPRE_BigInt *col_map_offd_C = NULL;
HYPRE_Int num_cols_offd_C = 0;
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext + 1, HYPRE_MEMORY_HOST);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext + 1, HYPRE_MEMORY_HOST);
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ns, ne, ii, num_threads;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(num_rows_Bext, num_threads, ii, &ns, &ne);
my_diag_size = 0;
my_offd_size = 0;
for (i = ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_rows_Bext] = B_ext_diag_size;
B_ext_offd_i[num_rows_Bext] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size || num_cols_offd_B)
{
temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_ext_offd_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B;
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
/* This computes the mappings */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i = 0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt - 1);
num_cols_offd_C = 1;
HYPRE_BigInt value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < ne; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++)
{
B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C);
}
}
} /* end parallel region */
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B - first_col_diag_B + 1,
B_ext_diag_size);
hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST;
Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size);
hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i;
hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j;
hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data;
hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i;
hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j;
hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data;
*col_map_offd_C_ptr = col_map_offd_C;
*Bext_diag_ptr = Bext_diag;
*Bext_offd_ptr = Bext_offd;
*num_cols_offd_C_ptr = num_cols_offd_C;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixReorderHost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixReorderHost(hypre_CSRMatrix *A)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A);
HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A);
HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Int i, ii, j;
/* the matrix should be square */
if (num_rows_A != num_cols_A)
{
return -1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nnzrows_A; i++)
{
ii = rownnz_A ? rownnz_A[i] : i;
for (j = A_i[ii]; j < A_i[ii + 1]; j++)
{
if (A_j[j] == ii)
{
if (j != A_i[ii])
{
hypre_swap(A_j, A_i[ii], j);
hypre_swap_c(A_data, A_i[ii], j);
}
break;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixReorder:
*
* Reorders the column and data arrays of a square CSR matrix, such that the
* first entry in each row is the diagonal one.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixReorder(hypre_CSRMatrix *A)
{
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixMoveDiagFirstDevice(A);
}
else
#endif
{
ierr = hypre_CSRMatrixReorderHost(A);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixAddPartial:
* adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i]
* defines to which row of A the i-th row of B is added, and returns a CSR Matrix C;
* Note: The routine does not check for 0-elements which might be generated
* through cancellation of elements in A and B or already contained
* in A and B. To remove those, use hypre_CSRMatrixDeleteZeros
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A,
hypre_CSRMatrix *B,
HYPRE_Int *row_nums)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A);
HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *B_data = hypre_CSRMatrixData(B);
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B);
HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B);
hypre_CSRMatrix *C;
HYPRE_Complex *C_data;
HYPRE_Int *C_i;
HYPRE_Int *C_j;
HYPRE_Int ia, ib, ic, jcol, num_nonzeros;
HYPRE_Int pos, i, i2, j, cnt;
HYPRE_Int *marker;
HYPRE_Int *map;
HYPRE_Int *temp;
HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
if (ncols_A != ncols_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! incompatible matrix dimensions!\n");
return NULL;
}
map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_B; i++)
{
map[i] = i;
temp[i] = row_nums[i];
}
hypre_qsort2i(temp, map, 0, nrows_B - 1);
marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST);
C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
num_nonzeros = 0;
C_i[0] = 0;
cnt = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++)
{
jcol = A_j[ia];
marker[jcol] = ic;
num_nonzeros++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2 + 1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] != ic)
{
marker[jcol] = ic;
num_nonzeros++;
}
}
}
else
{
break;
}
}
}
C_i[ic + 1] = num_nonzeros;
}
C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros);
hypre_CSRMatrixI(C) = C_i;
hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C);
C_j = hypre_CSRMatrixJ(C);
C_data = hypre_CSRMatrixData(C);
for (ia = 0; ia < ncols_A; ia++)
{
marker[ia] = -1;
}
cnt = 0;
pos = 0;
for (ic = 0; ic < nrows_A; ic++)
{
for (ia = A_i[ic]; ia < A_i[ic + 1]; ia++)
{
jcol = A_j[ia];
C_j[pos] = jcol;
C_data[pos] = A_data[ia];
marker[jcol] = pos;
pos++;
}
if (cnt < nrows_B && temp[cnt] == ic)
{
for (j = cnt; j < nrows_B; j++)
{
if (temp[j] == ic)
{
i2 = map[cnt++];
for (ib = B_i[i2]; ib < B_i[i2 + 1]; ib++)
{
jcol = B_j[ib];
if (marker[jcol] < C_i[ic])
{
C_j[pos] = jcol;
C_data[pos] = B_data[ib];
marker[jcol] = pos;
pos++;
}
else
{
C_data[marker[jcol]] += B_data[ib];
}
}
}
else
{
break;
}
}
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
hypre_TFree(map, HYPRE_MEMORY_HOST);
hypre_TFree(temp, HYPRE_MEMORY_HOST);
return C;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSumElts:
* Returns the sum of all matrix elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex
hypre_CSRMatrixSumElts( hypre_CSRMatrix *A )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_CSRMatrixData(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_nonzeros; i++)
{
sum += data[i];
}
return sum;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixFnorm
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_CSRMatrixFnorm( hypre_CSRMatrix *A )
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int i;
HYPRE_Complex sum = 0;
hypre_assert(num_nonzeros == A_i[nrows]);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_nonzeros; ++i)
{
HYPRE_Complex v = A_data[i];
sum += v * v;
}
return sqrt(sum);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixComputeRowSumHost
*
* type == 0, sum,
* 1, abs sum
* 2, square sum
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixComputeRowSumHost( hypre_CSRMatrix *A,
HYPRE_Int *CF_i,
HYPRE_Int *CF_j,
HYPRE_Complex *row_sum,
HYPRE_Int type,
HYPRE_Complex scal,
const char *set_or_add)
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int i, j;
for (i = 0; i < nrows; i++)
{
HYPRE_Complex row_sum_i = set_or_add[0] == 's' ? 0.0 : row_sum[i];
for (j = A_i[i]; j < A_i[i + 1]; j++)
{
if (CF_i && CF_j && CF_i[i] != CF_j[A_j[j]])
{
continue;
}
if (type == 0)
{
row_sum_i += scal * A_data[j];
}
else if (type == 1)
{
row_sum_i += scal * fabs(A_data[j]);
}
else if (type == 2)
{
row_sum_i += scal * A_data[j] * A_data[j];
}
}
row_sum[i] = row_sum_i;
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixComputeRowSum
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixComputeRowSum( hypre_CSRMatrix *A,
HYPRE_Int *CF_i,
HYPRE_Int *CF_j,
HYPRE_Complex *row_sum,
HYPRE_Int type,
HYPRE_Complex scal,
const char *set_or_add)
{
hypre_assert( (CF_i && CF_j) || (!CF_i && !CF_j) );
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_CSRMatrixComputeRowSumDevice(A, CF_i, CF_j, row_sum, type, scal, set_or_add);
}
else
#endif
{
hypre_CSRMatrixComputeRowSumHost(A, CF_i, CF_j, row_sum, type, scal, set_or_add);
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDiagonalHost
* type 0: diag
* 1: abs diag
* 2: diag inverse
* 3: diag inverse sqrt
* 4: abs diag inverse sqrt
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixExtractDiagonalHost( hypre_CSRMatrix *A,
HYPRE_Complex *d,
HYPRE_Int type)
{
HYPRE_Int nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int i, j;
HYPRE_Complex d_i;
for (i = 0; i < nrows; i++)
{
d_i = 0.0;
for (j = A_i[i]; j < A_i[i + 1]; j++)
{
if (A_j[j] == i)
{
if (type == 0)
{
d_i = A_data[j];
}
else if (type == 1)
{
d_i = fabs(A_data[j]);
}
else if (type == 2)
{
d_i = 1.0 / (A_data[j]);
}
else if (type == 3)
{
d_i = 1.0 / (sqrt(A_data[j]));
}
else if (type == 4)
{
d_i = 1.0 / (sqrt(fabs(A_data[j])));
}
break;
}
}
d[i] = d_i;
}
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDiagonal
*
* type 0: diag
* 1: abs diag
* 2: diag inverse
* 3: diag inverse sqrt
*--------------------------------------------------------------------------*/
void
hypre_CSRMatrixExtractDiagonal( hypre_CSRMatrix *A,
HYPRE_Complex *d,
HYPRE_Int type)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_CSRMatrixExtractDiagonalDevice(A, d, type);
}
else
#endif
{
hypre_CSRMatrixExtractDiagonalHost(A, d, type);
}
}
/* Scale CSR matrix A = scalar * A
*/
HYPRE_Int
hypre_CSRMatrixScale( hypre_CSRMatrix *A,
HYPRE_Complex scalar)
{
HYPRE_Complex *data = hypre_CSRMatrixData(A);
HYPRE_Int i;
HYPRE_Int k = hypre_CSRMatrixNumNonzeros(A);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypreDevice_Scalen(data, k, scalar);
}
else
#endif
{
for (i = 0; i < k; i++)
{
data[i] *= scalar;
}
}
return hypre_error_flag;
}
HYPRE_Int
hypre_CSRMatrixSetConstantValues( hypre_CSRMatrix *A,
HYPRE_Complex value)
{
HYPRE_Int i;
HYPRE_Int nnz = hypre_CSRMatrixNumNonzeros(A);
if (!hypre_CSRMatrixData(A))
{
hypre_CSRMatrixData(A) = hypre_TAlloc(HYPRE_Complex, nnz, hypre_CSRMatrixMemoryLocation(A));
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypreDevice_Filln(hypre_CSRMatrixData(A), nnz, value);
}
else
#endif
{
for (i = 0; i < nnz; i++)
{
hypre_CSRMatrixData(A)[i] = value;
}
}
return hypre_error_flag;
}
|
generator.c | #include "heads.h"
void matrix_generator(int nthreads) {
if (nthreads == 1) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
matrix[i][j] = (double)(rand() % (RANGE * 2) - RANGE) / SCALE;
}
}
}
else {
#pragma omp parallel for num_threads(nthreads)
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
matrix[i][j] = (double)(rand() % (RANGE * 2) - RANGE) / SCALE;
}
}
}
}
void vector_generator(int nthreads) {
if (nthreads == 1) {
for (int i = 0; i < SIZE; i++) {
vec[i][0] = (double)(rand() % (RANGE * 2) - RANGE) / SCALE;
}
}
else {
#pragma omp parallel for num_threads(nthreads)
for (int i = 0; i < SIZE; i++) {
vec[i][0] = (double)(rand() % (RANGE * 2) - RANGE) / SCALE;
}
}
} |
SpatialConvolutionMM.c | #include <string.h>
#ifdef USEQSML
#include <stdbool.h>
typedef struct qsml_info qsml_info;
#include <qsml.h>
#endif
#include "../thnets.h"
#ifndef USEQSML
static void nn_unfolded_copy(THFloatTensor *finput, THFloatTensor *input,
int kW, int kH, int dW, int dH, int padW, int padH,
int nInputPlane, int inputWidth, int inputHeight,
int outputWidth, int outputHeight)
{
long k;
float *input_data = THFloatTensor_data(input);
float *finput_data = THFloatTensor_data(finput);
#pragma omp parallel for private(k)
for(k = 0; k < nInputPlane*kH*kW; k++) {
long nip = k / (kH*kW);
long rest = k % (kH*kW);
long kh = rest / kW;
long kw = rest % kW;
long x,y;
long long ix,iy;
float *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth);
float *src = input_data + nip*(inputHeight*inputWidth);
if (padW > 0 || padH > 0) {
long lpad,rpad;
for(y = 0; y < outputHeight; y++) {
iy = (long long)(y*dH - padH + kh);
if (iy < 0 || iy >= inputHeight) {
memset(dst+y*outputWidth, 0, sizeof(float)*outputWidth);
} else {
if (dW==1){
ix = (long long)(0 - padW + kw);
lpad = thfmaxf(0,padW-kw);
rpad = thfmaxf(0,padW-(kW-kw-1));
if (outputWidth-rpad-lpad <= 0) {
memset(dst+(y*outputWidth), 0, sizeof(float)*outputWidth);
} else {
if (lpad > 0) memset(dst+y*outputWidth, 0, sizeof(float)*lpad);
memcpy(dst+(y*outputWidth+lpad), src+(iy*inputWidth+ix+lpad), sizeof(float)*(outputWidth-rpad-lpad));
if (rpad > 0) memset(dst+y*outputWidth + outputWidth - rpad, 0, sizeof(float)*rpad);
}
}
else{
for (x=0; x<outputWidth; x++){
ix = (long long)(x*dW - padW + kw);
if (ix < 0 || ix >= inputWidth)
memset(dst+(y*outputWidth+x), 0, sizeof(float)*1);
else
memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix), sizeof(float)*(1));
}
}
}
}
} else {
for(y = 0; y < outputHeight; y++) {
iy = (long long)(y*dH + kh);
ix = (long long)(0 + kw);
if (dW == 1)
memcpy(dst+(y*outputWidth), src+(iy*inputWidth+ix), sizeof(float)*outputWidth);
else{
for (x=0; x<outputWidth; x++)
memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix+x*dW), sizeof(float)*(1));
}
}
}
}
}
static void nn_SpatialConvolutionMM_updateOutput_frame(THFloatTensor *input, THFloatTensor *output,
THFloatTensor *weight, THFloatTensor *bias,
THFloatTensor *finput,
int kW, int kH, int dW, int dH, int padW, int padH,
long nInputPlane, long inputWidth, long inputHeight,
long nOutputPlane, long outputWidth, long outputHeight)
{
THFloatTensor *output2d = 0;
if(finput)
{
nn_unfolded_copy(finput, input, kW, kH, dW, dH, padW, padH,
(int)nInputPlane, (int)inputWidth, (int)inputHeight, (int)outputWidth, (int)outputHeight);
output2d = THFloatTensor_newWithStorage2d(output->storage, output->storageOffset,
nOutputPlane, -1, outputHeight*outputWidth, -1);
}
long i;
for (i = 0; i < nOutputPlane; i++)
{
float *data = output->storage->data + output->storageOffset + output->stride[0]*i;
float what = bias && bias->storage ? THFloatTensor_data(bias)[i] : 0;
long len = outputHeight*outputWidth;
THFloatVector_fill(data, what, len);
}
if(finput)
{
THFloatTensor_addmm(output2d, 1, output2d, 1, weight, finput);
THFloatTensor_free(output2d);
}
#ifndef USEBLAS
else THFloatTensor_convmm(output, 1, 1, weight, input, kH, kW, dH, dW, padH, padW);
#endif
}
#endif
#ifdef USEQSML
void applybias(struct module *newmod, int outP, int outW, int outH)
{
int i, wsize;
wsize = outW*outH;
float* biasdata = THFloatTensor_data(newmod->SpatialConvolution.bias);
float* outdata = THFloatTensor_data(newmod->output);
//float* biasdata = (float*) calloc(wsize*outP, sizeof(float));//one memcpy is better?
//memcpy(outdata, biasdata, wsize*outP*sizeof(float));//only saves 1ms
for(i=0; i<wsize; i++)
memcpy(outdata+i*outP, biasdata, outP*sizeof(float));
}
#endif
THFloatTensor *nn_SpatialConvolutionMM_updateOutput(struct module *module, THFloatTensor *input)
{
int kW = module->SpatialConvolution.kW;
int kH = module->SpatialConvolution.kH;
int dW = module->SpatialConvolution.dW;
int dH = module->SpatialConvolution.dH;
int padW = module->SpatialConvolution.padW;
int padH = module->SpatialConvolution.padH;
THFloatTensor *finput = module->SpatialConvolution.finput;//thnets [col,row,plane,batch]
#ifndef USEQSML
int batch = 1;
THFloatTensor *bias = module->SpatialConvolution.bias;
#endif
THFloatTensor *output = module->output;//[3col,2row,1plane,0batch]
THFloatTensor *weight = THFloatTensor_new();
THFloatTensor_set(weight, module->SpatialConvolution.weight);
THFloatTensor_resize2d(weight, weight->size[0], THFloatTensor_nElement(weight) / weight->size[0]);
if (input->nDimension == 3)
{
#ifndef USEQSML
batch = 0;
#endif
THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
}
long batchSize = input->size[0];
long nInputPlane = module->SpatialConvolution.nInputPlane;
long nOutputPlane = module->SpatialConvolution.nOutputPlane;
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
if(nInputPlane != input->size[1])
THError("nInputPlane %ld does not match input planes %ld", nInputPlane, input->size[1]);
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
if(module->type == MT_SpatialConvolutionMM)
THFloatTensor_resize3d(finput, batchSize, kW*kH*nInputPlane, outputHeight*outputWidth);
THFloatTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth);
#ifdef USEQSML
qsml_int channel = nInputPlane;
qsml_int inH = inputHeight;
qsml_int inW = inputWidth;
qsml_int qkW = kW;
qsml_int qkH = kH;
qsml_int qdW = dW;
qsml_int qdH = dH;
qsml_int qpadW = padW;
qsml_int qpadH = padH;
qsml_int outP = nOutputPlane;
qsml_int outH = outputHeight;
qsml_int outW = outputWidth;
applybias(module,(int)outP,(int)outW,(int)outH);//doesn't add much overhead on average
float *image = THFloatTensor_data(input);//[plane, col, row]
float *filtro = THFloatTensor_data(weight);//[outplane, plane, col, row]
float *outf = THFloatTensor_data(output);//[plane, col, row]
sconv_mm(true, image, inW, inH, channel,
filtro, outP, qkW, qkH, qpadW, qpadH,
qdW, qdH, outf, outW, outH);
#else
long t;
#pragma omp parallel for if(batchSize >= 4) private(t)
for (t = 0; t < batchSize; t++) {
THFloatTensor *input_t = THFloatTensor_newSelect(input, 0, t);
THFloatTensor *output_t = THFloatTensor_newSelect(output, 0, t);
THFloatTensor *finput_t = module->type == MT_SpatialConvolutionMM ? THFloatTensor_newSelect(finput, 0, t) : 0;
nn_SpatialConvolutionMM_updateOutput_frame(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THFloatTensor_free(input_t);
THFloatTensor_free(output_t);
THFloatTensor_free(finput_t);
}
if (batch == 0) {
THFloatTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth);
THFloatTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
}
THFloatTensor_free(weight);
#endif
return output;
}
|
pi.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The is the original sequential program. It uses the timer
from the OpenMP runtime library
History: Written by Tim Mattson, 11/99.
edited by Tan Chengsong, parallel program with padding 7/2017
another version: without padding, using critical section to avoid race condition and cache slosh
*/
#include <stdio.h>
#include <omp.h>
#define NUM_THREADS 4
#define PADDING 8
static long num_steps = 100000000;
double step;
int main ()
{
double pi = 0;
double start_time, run_time;
step = 1.0/(double) num_steps;
start_time = omp_get_wtime();
int block_len = num_steps/NUM_THREADS;
//double sum[NUM_THREADS];
#pragma omp parallel num_threads(NUM_THREADS)
{
int i;
double sum = 0.0;
int ID = omp_get_thread_num();
int start = ID * block_len;
int end = start + block_len;
double x;
for (i = start + 1; i <= end; i++){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
#pragma omp critical
pi += step * sum;
}
int j;
//for(j = 0; j < NUM_THREADS; j++)
// pi += step * sum[j];
run_time = omp_get_wtime() - start_time;
printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time);
}
|
test_zgemm_nopack.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "test.h"
#include "flops.h"
#include "plasma.h"
#include "core_lapack.h"
#include <assert.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#define COMPLEX
/***************************************************************************//**
*
* @brief Tests ZGEMM.
*
* @param[in,out] param - array of parameters
* @param[in] run - whether to run test
*
* Sets used flags in param indicating parameters that are used.
* If run is true, also runs test and stores output parameters.
******************************************************************************/
void test_zgemm_nopack(param_value_t param[], bool run)
{
//================================================================
// Mark which parameters are used.
//================================================================
param[PARAM_TRANSA ].used = true;
param[PARAM_TRANSB ].used = true;
param[PARAM_DIM ].used = PARAM_USE_M | PARAM_USE_N | PARAM_USE_K;
param[PARAM_ALPHA ].used = true;
param[PARAM_BETA ].used = true;
param[PARAM_PADA ].used = true;
param[PARAM_PADB ].used = true;
param[PARAM_PADC ].used = true;
param[PARAM_NB ].used = true;
if (! run)
return;
//================================================================
// Set parameters.
//================================================================
plasma_enum_t transa = plasma_trans_const(param[PARAM_TRANSA].c);
plasma_enum_t transb = plasma_trans_const(param[PARAM_TRANSB].c);
int m = param[PARAM_DIM].dim.m;
int n = param[PARAM_DIM].dim.n;
int k = param[PARAM_DIM].dim.k;
int Am, An;
int Bm, Bn;
int Cm, Cn;
if (transa == PlasmaNoTrans) {
Am = m;
An = k;
}
else {
Am = k;
An = m;
}
if (transb == PlasmaNoTrans) {
Bm = k;
Bn = n;
}
else {
Bm = n;
Bn = k;
}
Cm = m;
Cn = n;
int lda = imax(1, Am + param[PARAM_PADA].i);
int ldb = imax(1, Bm + param[PARAM_PADB].i);
int ldc = imax(1, Cm + param[PARAM_PADC].i);
int test = param[PARAM_TEST].c == 'y';
double eps = LAPACKE_dlamch('E');
#ifdef COMPLEX
plasma_complex64_t alpha = param[PARAM_ALPHA].z;
plasma_complex64_t beta = param[PARAM_BETA].z;
#else
double alpha = creal(param[PARAM_ALPHA].z);
double beta = creal(param[PARAM_BETA].z);
#endif
//================================================================
// Set tuning parameters.
//================================================================
plasma_set(PlasmaTuning, PlasmaDisabled);
plasma_set(PlasmaNb, param[PARAM_NB].i);
//================================================================
// Allocate and initialize arrays.
//================================================================
plasma_complex64_t *A =
(plasma_complex64_t*)malloc((size_t)lda*An*sizeof(plasma_complex64_t));
assert(A != NULL);
plasma_complex64_t *B =
(plasma_complex64_t*)malloc((size_t)ldb*Bn*sizeof(plasma_complex64_t));
assert(B != NULL);
plasma_complex64_t *C =
(plasma_complex64_t*)malloc((size_t)ldc*Cn*sizeof(plasma_complex64_t));
assert(C != NULL);
int seed[] = {0, 0, 0, 1};
lapack_int retval;
retval = LAPACKE_zlarnv(1, seed, (size_t)lda*An, A);
assert(retval == 0);
retval = LAPACKE_zlarnv(1, seed, (size_t)ldb*Bn, B);
assert(retval == 0);
retval = LAPACKE_zlarnv(1, seed, (size_t)ldc*Cn, C);
assert(retval == 0);
plasma_complex64_t *Cref = NULL;
if (test) {
Cref = (plasma_complex64_t*)malloc(
(size_t)ldc*Cn*sizeof(plasma_complex64_t));
assert(Cref != NULL);
memcpy(Cref, C, (size_t)ldc*Cn*sizeof(plasma_complex64_t));
}
//================================================================
// Run and time PLASMA.
//================================================================
/* plasma_zgemm(
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc);
*/
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -1;
}
if ((transb != PlasmaNoTrans) &&
(transb != PlasmaTrans) &&
(transb != PlasmaConjTrans)) {
plasma_error("illegal value of transb");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (k < 0) {
plasma_error("illegal value of k");
return -5;
}
int am, an;
int bm, bn;
if (transa == PlasmaNoTrans) {
am = m;
an = k;
}
else {
am = k;
an = m;
}
if (transb == PlasmaNoTrans) {
bm = k;
bn = n;
}
else {
bm = n;
bn = k;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -13;
}
// quick return
if (m == 0 || n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gemm(plasma, PlasmaComplexDouble, m, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t AA;
plasma_desc_t BB;
plasma_desc_t CC;
int retval1;
retval1 = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &AA);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval1;
}
retval1 = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
bm, bn, 0, 0, bm, bn, &BB);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AA);
return retval1;
}
retval1 = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &CC);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AA);
plasma_desc_destroy(&BB);
return retval1;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval1 = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval1 = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(A, lda, AA, &sequence, &request);
plasma_omp_zge2desc(B, ldb, BB, &sequence, &request);
plasma_omp_zge2desc(C, ldc, CC, &sequence, &request);
}
plasma_time_t start = omp_get_wtime();
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_zgemm(transa, transb,
alpha, AA,
BB,
beta, CC,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(CC, C, ldc, &sequence, &request);
}
// implicit synchronization
plasma_time_t stop = omp_get_wtime();
plasma_time_t time = stop-start;
param[PARAM_TIME].d = time;
param[PARAM_GFLOPS].d = flops_zgemm(m, n, k) / time / 1e9;
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(CC, C, ldc, &sequence, &request);
}
// Free matrices in tile layout.
plasma_desc_destroy(&AA);
plasma_desc_destroy(&BB);
plasma_desc_destroy(&CC);
//plasma_time_t time = stop-start;
//param[PARAM_TIME].d = time;
//param[PARAM_GFLOPS].d = flops_zgemm(m, n, k) / time / 1e9;
//================================================================
// Test results by comparing to a reference implementation.
//================================================================
if (test) {
// |R - R_ref|_p < gamma_{k+2} * |alpha| * |A|_p * |B|_p +
// gamma_2 * |beta| * |C|_p
// holds component-wise or with |.|_p as 1, inf, or Frobenius norm.
// gamma_k = k*eps / (1 - k*eps), but we use
// gamma_k = sqrt(k)*eps as a statistical average case.
// Using 3*eps covers complex arithmetic.
// See Higham, Accuracy and Stability of Numerical Algorithms, ch 2-3.
double work[1];
double Anorm = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Am, An, A, lda, work);
double Bnorm = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Bm, Bn, B, ldb, work);
double Cnorm = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Cm, Cn, Cref, ldc, work);
cblas_zgemm(
CblasColMajor,
(CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb,
m, n, k,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
CBLAS_SADDR(beta), Cref, ldc);
plasma_complex64_t zmone = -1.0;
cblas_zaxpy((size_t)ldc*Cn, CBLAS_SADDR(zmone), Cref, 1, C, 1);
double error = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Cm, Cn, C, ldc, work);
double normalize = sqrt((double)k+2) * cabs(alpha) * Anorm * Bnorm
+ 2 * cabs(beta) * Cnorm;
if (normalize != 0)
error /= normalize;
param[PARAM_ERROR].d = error;
param[PARAM_SUCCESS].i = error < 3*eps;
}
//================================================================
// Free arrays.
//================================================================
free(A);
free(B);
free(C);
if (test)
free(Cref);
}
|
convolution_sgemm_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const __fp16* bias = _bias;
// permute
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 16u, 8, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 8, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 16u, 8, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
__fp16* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
img0 += size * 8;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size * 8;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const __fp16* tmpptr = tmp.channel(i / 12);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v20.8h}, [%8] \n"
"mov v21.16b, v20.16b \n"
"mov v22.16b, v20.16b \n"
"mov v23.16b, v20.16b \n"
"mov v24.16b, v20.16b \n"
"mov v25.16b, v20.16b \n"
"mov v26.16b, v20.16b \n"
"mov v27.16b, v20.16b \n"
"mov v28.16b, v20.16b \n"
"mov v29.16b, v20.16b \n"
"mov v30.16b, v20.16b \n"
"mov v31.16b, v20.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"mov v18.16b, v16.16b \n"
"mov v19.16b, v16.16b \n"
"mov v20.16b, v16.16b \n"
"mov v21.16b, v16.16b \n"
"mov v22.16b, v16.16b \n"
"mov v23.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < size; i += 4)
{
const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"mov v18.16b, v16.16b \n"
"mov v19.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < size; i += 2)
{
const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < size; i++)
{
const __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-8a-maxk-inch/8a-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(64 * maxk, inch / 8, outch / 8, (size_t)2u);
for (int q = 0; q + 7 < outch; q += 8)
{
__fp16* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 8; i++)
{
for (int j = 0; j < 8; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = (__fp16)k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
__fp16* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x8_t _val0 = vld1q_f16(sptr);
float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8);
float16x8_t _val2 = vld1q_f16(sptr + stride_w * 16);
float16x8_t _val3 = vld1q_f16(sptr + stride_w * 24);
vst1q_f16(ptr, _val0);
vst1q_f16(ptr + 8, _val1);
vst1q_f16(ptr + 16, _val2);
vst1q_f16(ptr + 24, _val3);
sptr += stride_w * 32;
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
float16x8_t _val0 = vld1q_f16(sptr);
float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8);
vst1q_f16(ptr, _val0);
vst1q_f16(ptr + 8, _val1);
sptr += stride_w * 16;
ptr += 16;
}
for (; j < outw; j++)
{
float16x8_t _val = vld1q_f16(sptr);
vst1q_f16(ptr, _val);
sptr += stride_w * 8;
ptr += 8;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
dorgqr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zungqr.c, normal z -> d, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_ungqr
*
* Generates an m-by-n matrix Q with orthonormal columns, which
* is defined as the first n columns of a product of the elementary reflectors
* returned by plasma_dgeqrf.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix Q. m >= 0.
*
* @param[in] n
* The number of columns of the matrix Q. m >= n >= 0.
*
* @param[in] k
* The number of columns of elementary tile reflectors whose product
* defines the matrix Q.
* n >= k >= 0.
*
* @param[in] pA
* Details of the QR factorization of the original matrix A as returned
* by plasma_dgeqrf, where the k first columns are the reflectors.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_dgeqrf.
*
* @param[out] pQ
* On exit, pointer to the m-by-n matrix Q.
*
* @param[in] ldq
* The leading dimension of the array Q. ldq >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_dorgqr
* @sa plasma_cungqr
* @sa plasma_dorgqr
* @sa plasma_sorgqr
* @sa plasma_dgeqrf
*
******************************************************************************/
int plasma_dorgqr(int m, int n, int k,
double *pA, int lda,
plasma_desc_t T,
double *pQ, int ldq)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0 || n > m) {
plasma_error("illegal value of n");
return -2;
}
if (k < 0 || k > n) {
plasma_error("illegal value of k");
return -3;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -5;
}
if (ldq < imax(1, m)) {
plasma_error("illegal value of ldq");
return -8;
}
// quick return
if (n <= 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geqrf(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t Q;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, k, &Q);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmqr: work
retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pQ, ldq, Q, &sequence, &request);
// Call the tile async function.
plasma_omp_dorgqr(A, T, Q, work, &sequence, &request);
// Translate Q back to LAPACK layout.
plasma_omp_ddesc2ge(Q, pQ, ldq, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&Q);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_ungqr
*
* Non-blocking tile version of plasma_dorgqr().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_dgeqrf.
*
* @param[out] Q
* Descriptor of matrix Q. On exit, matrix Q stored in the tile layout.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dorgqr
* @sa plasma_omp_cungqr
* @sa plasma_omp_dorgqr
* @sa plasma_omp_sorgqr
* @sa plasma_omp_dgeqrf
*
******************************************************************************/
void plasma_omp_dorgqr(plasma_desc_t A, plasma_desc_t T, plasma_desc_t Q,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(Q) != PlasmaSuccess) {
plasma_error("invalid Q");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (Q.n <= 0)
return;
// Set Q to identity.
plasma_pdlaset(PlasmaGeneral, 0.0, 1.0, Q, sequence, request);
// Construct Q.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pdorgqr_tree(A, T, Q, work, sequence, request);
}
else {
plasma_pdorgqr(A, T, Q, work, sequence, request);
}
}
|
utils.c | #include "utils.h"
#include <complex.h>
#include <math.h>
#include <mkl.h>
#include <mkl_types.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define PI 3.14159265358979323846
#define CCONST 0.262465831
#define OPSIZE 9
void affine_transform(double *out, double *op, double *inv) {
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
// 12 13 14 15
double in[4] = {inv[0], inv[1], inv[2], 1};
out[0] = op[0] * in[0] + op[1] * in[1] + op[2] * in[2] + op[3] * in[3];
out[1] = op[4] * in[0] + op[5] * in[1] + op[6] * in[2] + op[7] * in[3];
out[2] = op[8] * in[0] + op[9] * in[1] + op[10] * in[2] + op[11] * in[3];
}
void rotation_transform(double *out, double *op, double *inv) {
double in[3] = {inv[0], inv[1], inv[2]};
out[0] = op[0] * in[0] + op[1] * in[1] + op[2] * in[2];
out[1] = op[3] * in[0] + op[4] * in[1] + op[5] * in[2];
out[2] = op[6] * in[0] + op[7] * in[1] + op[8] * in[2];
}
void vcross(double *res, double *top, double *bottom) {
res[0] = top[1] * bottom[2] - top[2] * bottom[1];
res[1] = top[2] * bottom[0] - top[0] * bottom[2];
res[2] = top[0] * bottom[1] - top[1] * bottom[0];
}
double dot(double *x1, double *x2) {
return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2];
}
double mag(double *x1) { return pow(dot(x1, x1), 0.5); }
double determinant(double *m) {
return m[0] * m[4] * m[8] + m[1] * m[5] * m[6] + m[2] * m[3] * m[7] -
m[2] * m[4] * m[6] - m[1] * m[3] * m[8] - m[0] * m[5] * m[7];
}
void min_cart_path(double *coord, double *center, double *lattice, double *path,
double *r) {
*r = INFINITY;
double testvec[3] = {0, 0, 0};
double testdist;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
for (int k = -1; k <= 1; k++) {
testvec[0] = coord[0] + i - center[0];
testvec[1] = coord[1] + j - center[1];
testvec[2] = coord[2] + k - center[2];
frac_to_cartesian(testvec, lattice);
testdist = mag(testvec);
if (testdist < *r) {
path[0] = testvec[0];
path[1] = testvec[1];
path[2] = testvec[2];
*r = testdist;
}
}
}
}
}
void frac_from_spherical(double *ion_frac, double r, double theta, double phi,
double *lattice, double *reclattice, double *result) {
double cart[3];
cart[0] = r * sin(theta) * cos(phi);
cart[1] = r * sin(theta) * sin(phi);
cart[2] = r * cos(theta);
cartesian_to_frac(cart, reclattice);
result[0] = fmod(cart[0] + ion_frac[0], 1.00);
result[1] = fmod(cart[1] + ion_frac[1], 1.00);
result[2] = fmod(cart[2] + ion_frac[2], 1.00);
if (result[0] < 0)
result[0] += 1;
if (result[1] < 0)
result[1] += 1;
if (result[2] < 0)
result[2] += 1;
}
void trilinear_interpolate_values(double complex *x, double *frac, int *fftg,
double complex *values) {
// values: c000, c001, c010, c011, c100, c101, c110, c111
int i = (int)(frac[0] * fftg[0]);
int j = (int)(frac[1] * fftg[1]);
int k = (int)(frac[2] * fftg[2]);
int ip = (i + 1) % fftg[0];
int jp = (j + 1) % fftg[1];
int kp = (k + 1) % fftg[2];
int ind[8];
ind[0] = i * fftg[1] * fftg[2] + j * fftg[2] + k;
ind[1] = i * fftg[1] * fftg[2] + j * fftg[2] + kp;
ind[2] = i * fftg[1] * fftg[2] + jp * fftg[2] + k;
ind[3] = i * fftg[1] * fftg[2] + jp * fftg[2] + kp;
ind[4] = ip * fftg[1] * fftg[2] + j * fftg[2] + k;
ind[5] = ip * fftg[1] * fftg[2] + j * fftg[2] + kp;
ind[6] = ip * fftg[1] * fftg[2] + jp * fftg[2] + k;
ind[7] = ip * fftg[1] * fftg[2] + jp * fftg[2] + kp;
for (int n = 0; n < 8; n++)
values[n] = x[ind[n]];
}
double complex trilinear_interpolate(double complex *c, double *frac,
int *fftg) {
// values: c000, c001, c010, c011, c100, c101, c110, c111
double d[3];
d[0] = fmod(frac[0] * fftg[0], 1.0);
d[1] = fmod(frac[1] * fftg[1], 1.0);
d[2] = fmod(frac[2] * fftg[2], 1.0);
// printf("%lf %lf %lf\n", d[0], d[1], d[2]);
double complex c00 = c[0] * (1 - d[0]) + c[4] * d[0];
double complex c01 = c[1] * (1 - d[0]) + c[5] * d[0];
double complex c10 = c[2] * (1 - d[0]) + c[6] * d[0];
double complex c11 = c[3] * (1 - d[0]) + c[7] * d[0];
double complex c0 = c00 * (1 - d[1]) + c10 * d[1];
double complex c1 = c01 * (1 - d[1]) + c11 * d[1];
return c0 * (1 - d[2]) + c1 * d[2];
}
double dist_from_frac(double *coords1, double *coords2, double *lattice) {
double f1 =
fmin(fabs(coords1[0] - coords2[0]), 1 - fabs(coords1[0] - coords2[0]));
double f2 =
fmin(fabs(coords1[1] - coords2[1]), 1 - fabs(coords1[1] - coords2[1]));
double f3 =
fmin(fabs(coords1[2] - coords2[2]), 1 - fabs(coords1[2] - coords2[2]));
return pow(pow(f1 * lattice[0] + f2 * lattice[3] + f3 * lattice[6], 2) +
pow(f1 * lattice[1] + f2 * lattice[4] + f3 * lattice[7], 2) +
pow(f1 * lattice[2] + f2 * lattice[5] + f3 * lattice[8], 2),
0.5);
}
void frac_to_cartesian(double *coord, double *lattice) {
double temp[3] = {0, 0, 0};
temp[0] =
coord[0] * lattice[0] + coord[1] * lattice[3] + coord[2] * lattice[6];
temp[1] =
coord[0] * lattice[1] + coord[1] * lattice[4] + coord[2] * lattice[7];
temp[2] =
coord[0] * lattice[2] + coord[1] * lattice[5] + coord[2] * lattice[8];
coord[0] = temp[0];
coord[1] = temp[1];
coord[2] = temp[2];
}
void cartesian_to_frac(double *coord, double *reclattice) {
double temp[3] = {0, 0, 0};
temp[0] = coord[0] * reclattice[0] + coord[1] * reclattice[1] +
coord[2] * reclattice[2];
temp[1] = coord[0] * reclattice[3] + coord[1] * reclattice[4] +
coord[2] * reclattice[5];
temp[2] = coord[0] * reclattice[6] + coord[1] * reclattice[7] +
coord[2] * reclattice[8];
coord[0] = temp[0] / 2 / PI;
coord[1] = temp[1] / 2 / PI;
coord[2] = temp[2] / 2 / PI;
}
void free_rayleigh_set_list(rayleigh_set_t *sets, int num_projs) {
for (int i = 0; i < num_projs; i++)
free(sets[i].terms);
free(sets);
}
void free_projection_list(projection_t *projlist, int num) {
for (int i = 0; i < num; i++) {
free(projlist[i].ms);
free(projlist[i].ls);
free(projlist[i].ns);
free(projlist[i].overlaps);
}
free(projlist);
}
void clean_wave_projections(pswf_t *wf) {
for (int i = 0; i < wf->nwk * wf->nspin; i++) {
kpoint_t *kpt = wf->kpts[i];
for (int b = 0; b < kpt->num_bands; b++) {
if (kpt->bands[b]->wave_projections != NULL) {
free_projection_list(kpt->bands[b]->wave_projections, wf->wp_num);
kpt->bands[b]->wave_projections = NULL;
}
}
}
}
void free_kpoint(kpoint_t *kpt, int num_elems, int num_sites, int wp_num,
int *num_projs) {
for (int b = 0; b < kpt->num_bands; b++) {
band_t *curr_band = kpt->bands[b];
free(curr_band->Cs);
if (curr_band->projections != NULL) {
free_projection_list(curr_band->projections, num_sites);
}
if (curr_band->wave_projections != NULL) {
free_projection_list(curr_band->wave_projections, wp_num);
}
if (curr_band->up_projections != NULL) {
free_projection_list(curr_band->up_projections, num_sites);
}
if (curr_band->down_projections != NULL) {
free_projection_list(curr_band->down_projections, num_sites);
}
if (curr_band->CRs != NULL) {
mkl_free(curr_band->CRs);
}
if (curr_band->CAs != NULL) {
mkl_free(curr_band->CAs);
}
free(curr_band);
}
if (kpt->expansion != NULL) {
for (int i = 0; i < num_elems; i++)
free_rayleigh_set_list(kpt->expansion[i], num_projs[i]);
free(kpt->expansion);
}
free(kpt->Gs);
free(kpt->bands);
free(kpt->k);
free(kpt);
}
void free_ppot(ppot_t *pp) {
for (int i = 0; i < pp->num_projs; i++) {
free(pp->funcs[i].proj);
free(pp->funcs[i].pswave);
free(pp->funcs[i].aewave);
free(pp->funcs[i].diffwave);
free(pp->funcs[i].kwave);
free(pp->funcs[i].smooth_diffwave);
for (int j = 0; j < 3; j++) {
free(pp->funcs[i].proj_spline[j]);
free(pp->funcs[i].aewave_spline[j]);
free(pp->funcs[i].pswave_spline[j]);
free(pp->funcs[i].diffwave_spline[j]);
free(pp->funcs[i].kwave_spline[j]);
free(pp->funcs[i].smooth_diffwave_spline[j]);
}
free(pp->funcs[i].proj_spline);
free(pp->funcs[i].aewave_spline);
free(pp->funcs[i].pswave_spline);
free(pp->funcs[i].diffwave_spline);
free(pp->funcs[i].kwave_spline);
free(pp->funcs[i].smooth_diffwave_spline);
}
free(pp->funcs);
free(pp->wave_grid);
free(pp->kwave_grid);
free(pp->proj_grid);
free(pp->smooth_grid);
free(pp->pspw_overlap_matrix);
free(pp->aepw_overlap_matrix);
free(pp->diff_overlap_matrix);
}
void free_real_proj(real_proj_t *proj) { free(proj->values); }
void free_pswf(pswf_t *wf) {
for (int i = 0; i < wf->nwk * wf->nspin; i++)
free_kpoint(wf->kpts[i], wf->num_elems, wf->num_sites, wf->wp_num,
wf->num_projs);
if (wf->overlaps != NULL) {
for (int i = 0; i < wf->num_aug_overlap_sites; i++)
free(wf->overlaps[i]);
free(wf->overlaps);
}
if (wf->num_projs != NULL) {
free(wf->num_projs);
}
free(wf->kpts);
free(wf->G_bounds);
free(wf->lattice);
free(wf->reclattice);
if (wf->pps != NULL) {
free_ppot_list(wf->pps, wf->num_elems);
}
if (wf->dcoords != NULL) {
free(wf->dcoords);
}
if (wf->fftg != NULL) {
free(wf->fftg);
}
free(wf);
}
void free_real_proj_site(real_proj_site_t *site) {
for (int i = 0; i < site->total_projs; i++) {
free_real_proj(site->projs + i);
}
free(site->projs);
free(site->indices);
free(site->coord);
free(site->paths);
}
void free_ptr(void *ptr) { free(ptr); }
void free_real_proj_site_list(real_proj_site_t *sites, int length) {
for (int i = 0; i < length; i++) {
free_real_proj_site(sites + i);
}
free(sites);
}
void free_ppot_list(ppot_t *pps, int length) {
for (int i = 0; i < length; i++) {
free_ppot(pps + i);
}
free(pps);
}
int min(int a, int b) {
if (a > b)
return b;
else
return a;
}
int max(int a, int b) {
if (a < b)
return b;
else
return a;
}
double *get_occs(pswf_t *wf) {
kpoint_t **kpts = wf->kpts;
double *occs =
(double *)malloc(wf->nwk * wf->nband * wf->nspin * sizeof(double));
CHECK_ALLOCATION(occs);
int NUM_KPTS = wf->nwk * wf->nspin;
for (int kpt_num = 0; kpt_num < NUM_KPTS; kpt_num++) {
for (int band_num = 0; band_num < wf->nband; band_num++) {
occs[band_num * NUM_KPTS + kpt_num] = kpts[kpt_num]->bands[band_num]->occ;
}
}
return occs;
}
int get_nband(pswf_t *wf) { return wf->nband; }
int get_nwk(pswf_t *wf) { return wf->nwk; }
int get_nspin(pswf_t *wf) { return wf->nspin; }
double get_encut(pswf_t *wf) { return wf->encut; }
int is_ncl(pswf_t *wf) { return wf->is_ncl; }
double get_energy(pswf_t *wf, int band, int kpt, int spin) {
return wf->kpts[kpt + spin * wf->nwk]->bands[band]->energy;
}
double get_occ(pswf_t *wf, int band, int kpt, int spin) {
return wf->kpts[kpt + spin * wf->nwk]->bands[band]->occ;
}
void set_num_sites(pswf_t *wf, int nsites) { wf->num_sites = nsites; }
double legendre(int l, int m, double x) {
double total = 0;
if (m < 0)
return pow(-1.0, m) * fac(l + m) / fac(l - m) * legendre(l, -m, x);
for (int n = l; n >= 0 && 2 * n - l - m >= 0; n--) {
total += pow(x, 2 * n - l - m) * fac(2 * n) / fac(2 * n - l - m) / fac(n) /
fac(l - n) * pow(-1, l - n);
}
return total * pow(-1, m) * pow(1 - x * x, m / 2.0) / pow(2, l);
}
void legendre_coeff(double *ptr, int l, int m) {
// assumes ptr is cleared
double prefac = pow(-1, m) / pow(2, l);
if (m < 0) {
prefac *= pow(-1.0, m) * fac(l + m) / fac(l - m);
}
m = abs(m);
for (int n = l; n >= 0 && 2 * n - l - m >= 0; n--) {
ptr[n] = fac(2 * n) / fac(2 * n - l - m) / fac(n) / fac(l - n) *
pow(-1, l - n) * prefac;
}
}
double *legendre_product(int l1, int l2, int m1, int m2) {
int m = m2 - m1;
int maxl = l1 + l2;
double *lp1 = (double *)calloc((l1 + 1), sizeof(double));
double *lp2 = (double *)calloc((l2 + 1), sizeof(double));
double *polynomial = (double *)calloc((maxl + 1), sizeof(double));
double *test = (double *)calloc((maxl + 1), sizeof(double));
double *coeff = (double *)calloc((maxl + 1), sizeof(double));
legendre_coeff(lp1, l1, m1);
legendre_coeff(lp2, l2, m2);
for (int n1 = 0; n1 <= l1; n1++) {
for (int n2 = 0; n2 <= l2; n2++) {
polynomial[n1 + n2] += lp1[n1] * lp2[n2];
}
}
for (int l = maxl; l >= abs(m); l--) {
legendre_coeff(test, l, m);
coeff[l] = polynomial[l] / test[l];
for (int lp = abs(m); lp <= maxl; lp++) {
polynomial[lp] -= coeff[l] * test[lp];
test[lp] = 0;
}
}
free(lp1);
free(lp2);
free(polynomial);
free(test);
return coeff;
}
double fac(int n) {
int m = 1;
int t = 1;
while (m <= n) {
t *= m;
m++;
}
return (double)t;
}
double complex Ylm(int l, int m, double theta, double phi) {
// printf("%lf %lf %lf\n", pow((2*l+1)/(4*PI)*fac(l-m)/fac(l+m), 0.5),
// legendre(l, m, cos(theta)), creal(cexp(I*m*phi))); double complex
// multiplier = 0; if (m == 0) multiplier = 1; else if (m < 0) multiplier =
// pow(2.0, 0.5)
// * cos(-m*phi); else multiplier = pow(-1,m) * pow(2.0, 0.5) * sin(m*phi);
return pow((2 * l + 1) / (4 * PI) * fac(l - m) / fac(l + m), 0.5) *
legendre(l, m, cos(theta)) * cexp(I * m * phi);
}
double complex Ylm2(int l, int m, double costheta, double phi) {
// printf("%lf %lf %lf\n", pow((2*l+1)/(4*PI)*fac(l-m)/fac(l+m), 0.5),
// legendre(l, m, cos(theta)), creal(cexp(I*m*phi))); double complex
// multiplier = 0; if (m == 0) multiplier = 1; else if (m < 0) multiplier =
// pow(2.0, 0.5)
// * cos(-m*phi); else multiplier = pow(-1,m) * pow(2.0, 0.5) * sin(m*phi);
return pow((2 * l + 1) / (4 * PI) * fac(l - m) / fac(l + m), 0.5) *
legendre(l, m, costheta) * cexp(I * m * phi);
}
double proj_interpolate(double r, double rmax, int size, double *x,
double *proj, double **proj_spline) {
if (r > x[size - 1])
return 0;
if (r < x[0])
return proj[0];
int ind = min((int)(r / rmax * size), size - 2);
double rem = r - x[ind];
double radval =
proj[ind] +
rem * (proj_spline[0][ind] +
rem * (proj_spline[1][ind] + rem * proj_spline[2][ind]));
return radval;
}
double wave_interpolate(double r, int size, double *x, double *f,
double **wave_spline) {
if (r > x[size - 1])
return 0;
if (r < x[0])
return f[0];
int ind = min((int)(log(r / x[0]) / log(x[1] / x[0])), size - 2);
double rem = r - x[ind];
return f[ind] +
rem * (wave_spline[0][ind] +
rem * (wave_spline[1][ind] + rem * wave_spline[2][ind]));
}
double complex wave_value(funcset_t funcs, int size, double *x, int m,
double *ion_pos, double *pos, double *lattice) {
double temp[3] = {0, 0, 0};
double r = 0;
min_cart_path(pos, ion_pos, lattice, temp, &r);
double ae_radial_val =
wave_interpolate(r, size, x, funcs.aewave, funcs.aewave_spline);
double ps_radial_val =
wave_interpolate(r, size, x, funcs.pswave, funcs.pswave_spline);
double radial_val = (ae_radial_val - ps_radial_val);
if (r < x[0])
radial_val /= x[0];
else
radial_val /= r;
if (r == 0)
return Ylm(funcs.l, m, 0, 0) * radial_val;
double theta = 0, phi = 0;
theta = acos(temp[2] / r);
if (r - fabs(temp[2]) == 0)
phi = 0;
else
phi = acos(temp[0] / pow(temp[0] * temp[0] + temp[1] * temp[1], 0.5));
if (temp[1] < 0)
phi = 2 * PI - phi;
double complex sph_val = Ylm(funcs.l, m, theta, phi);
return radial_val * sph_val;
}
double complex wave_value2(double *x, double *wave, double **spline, int size,
int l, int m, double *pos) {
double r = mag(pos);
double radial_val = wave_interpolate(r, size, x, wave, spline);
if (r < x[0])
radial_val /= x[0];
else
radial_val /= r;
if (r == 0)
return Ylm(l, m, 0, 0) * radial_val;
double theta = 0, phi = 0;
theta = acos(pos[2] / r);
if (r - fabs(pos[2]) == 0)
phi = 0;
else
phi = acos(pos[0] / pow(pos[0] * pos[0] + pos[1] * pos[1], 0.5));
if (pos[1] < 0)
phi = 2 * PI - phi;
double complex sph_val = Ylm(l, m, theta, phi);
return radial_val * sph_val;
}
double complex proj_value_helper(double r, double rmax, int size, double *temp,
double *x, double *f, double **s, int l,
int m) {
double radial_val = proj_interpolate(r, rmax, size, x, f, s);
if (r == 0)
return Ylm(l, m, 0, 0) * radial_val;
double theta = 0, phi = 0;
theta = acos(temp[2] / r);
if (r - fabs(temp[2]) == 0)
phi = 0;
else
phi = acos(temp[0] / pow(temp[0] * temp[0] + temp[1] * temp[1], 0.5));
if (temp[1] < 0)
phi = 2 * PI - phi;
double complex sph_val = Ylm(l, m, theta, phi);
return radial_val * sph_val;
}
double complex proj_value(funcset_t funcs, double *x, int m, double rmax,
int size, double *ion_pos, double *pos,
double *lattice) {
double temp[3] = {0, 0, 0};
double r = 0;
min_cart_path(pos, ion_pos, lattice, temp, &r);
return proj_value_helper(r, rmax, size, temp, x, funcs.proj,
funcs.proj_spline, funcs.l, m);
}
double complex smooth_wave_value(funcset_t funcs, double *x, int m, double rmax,
int size, double *ion_pos, double *pos,
double *lattice) {
double temp[3] = {0, 0, 0};
double r = 0;
min_cart_path(pos, ion_pos, lattice, temp, &r);
return proj_value_helper(r, rmax, size, temp, x, funcs.smooth_diffwave,
funcs.smooth_diffwave_spline, funcs.l, m);
}
void setup_site(real_proj_site_t *sites, ppot_t *pps, int num_sites,
int *site_nums, int *labels, double *coords, double *lattice,
int *fftg, int pr0_pw1) {
double vol = determinant(lattice);
for (int s = 0; s < num_sites; s++) {
int i = site_nums[s];
sites[s].index = i;
sites[s].elem = labels[i];
sites[s].gridsize = pps[labels[i]].proj_gridsize;
sites[s].num_projs = pps[labels[i]].num_projs;
if (pr0_pw1)
sites[s].rmax = pps[labels[i]].wave_rmax;
else
sites[s].rmax = pps[labels[i]].rmax;
sites[s].total_projs = pps[labels[i]].total_projs;
sites[s].num_indices = 0;
sites[s].coord = malloc(3 * sizeof(double));
CHECK_ALLOCATION(sites[s].coord);
sites[s].coord[0] = coords[3 * i + 0];
sites[s].coord[1] = coords[3 * i + 1];
sites[s].coord[2] = coords[3 * i + 2];
sites[s].indices = calloc(pps[labels[i]].num_cart_gridpts, sizeof(int));
CHECK_ALLOCATION(sites[s].indices);
sites[s].projs =
(real_proj_t *)malloc(sites[s].total_projs * sizeof(real_proj_t));
int p = 0;
sites[s].paths =
malloc(3 * pps[labels[i]].num_cart_gridpts * sizeof(double));
CHECK_ALLOCATION(sites[s].paths);
for (int j = 0; j < sites[s].num_projs; j++) {
for (int m = -pps[labels[i]].funcs[j].l; m <= pps[labels[i]].funcs[j].l;
m++) {
sites[s].projs[p].l = pps[labels[i]].funcs[j].l;
sites[s].projs[p].m = m;
sites[s].projs[p].func_num = j;
sites[s].projs[p].values =
malloc(pps[labels[i]].num_cart_gridpts * sizeof(double complex));
CHECK_ALLOCATION(sites[s].projs[p].values);
p++;
}
}
}
//#pragma omp parallel for
for (int s = 0; s < num_sites; s++) {
int p = site_nums[s];
double res[3] = {0, 0, 0};
double frac[3] = {0, 0, 0};
double testcoord[3] = {0, 0, 0};
vcross(res, lattice + 3, lattice + 6);
int grid1 = (int)(mag(res) * sites[s].rmax / vol * fftg[0]) + 1;
vcross(res, lattice + 0, lattice + 6);
int grid2 = (int)(mag(res) * sites[s].rmax / vol * fftg[1]) + 1;
vcross(res, lattice + 0, lattice + 3);
int grid3 = (int)(mag(res) * sites[s].rmax / vol * fftg[2]) + 1;
int center1 = (int)round(coords[3 * p + 0] * fftg[0]);
int center2 = (int)round(coords[3 * p + 1] * fftg[1]);
int center3 = (int)round(coords[3 * p + 2] * fftg[2]);
int ii = 0, jj = 0, kk = 0;
double R0 = (pps[labels[p]].proj_gridsize - 1) * sites[s].rmax /
pps[labels[s]].proj_gridsize;
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double)i / fftg[0] - coords[3 * p + 0];
testcoord[1] = (double)j / fftg[1] - coords[3 * p + 1];
testcoord[2] = (double)k / fftg[2] - coords[3 * p + 2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < R0) {
ii = (i % fftg[0] + fftg[0]) % fftg[0];
jj = (j % fftg[1] + fftg[1]) % fftg[1];
kk = (k % fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double)ii / fftg[0];
frac[1] = (double)jj / fftg[1];
frac[2] = (double)kk / fftg[2];
sites[s].indices[sites[s].num_indices] =
ii * fftg[1] * fftg[2] + jj * fftg[2] + kk;
sites[s].paths[3 * sites[s].num_indices + 0] = testcoord[0];
sites[s].paths[3 * sites[s].num_indices + 1] = testcoord[1];
sites[s].paths[3 * sites[s].num_indices + 2] = testcoord[2];
for (int n = 0; n < sites[s].total_projs; n++) {
if (pr0_pw1)
sites[s].projs[n].values[sites[s].num_indices] =
smooth_wave_value(
pps[labels[p]].funcs[sites[s].projs[n].func_num],
pps[labels[p]].smooth_grid, sites[s].projs[n].m,
sites[s].rmax, pps[labels[p]].proj_gridsize,
coords + 3 * p, frac, lattice);
else
sites[s].projs[n].values[sites[s].num_indices] =
proj_value(pps[labels[p]].funcs[sites[s].projs[n].func_num],
pps[labels[p]].proj_grid, sites[s].projs[n].m,
sites[s].rmax, pps[labels[p]].proj_gridsize,
coords + 3 * p, frac, lattice);
}
sites[s].num_indices++;
// if (sites[s].num_indices >= pps[labels[p]].num_cart_gridpts)
// printf("SETUP_SITE ERROR %d %d %d\n", sites[s].num_indices, p,
// pr0_pw1);
}
}
}
}
}
}
// adapted from VASP source code
double **spline_coeff(double *x, double *y, int N) {
double **coeff = (double **)malloc(3 * sizeof(double *));
CHECK_ALLOCATION(coeff);
coeff[0] = (double *)malloc(N * sizeof(double));
coeff[1] = (double *)malloc(N * sizeof(double));
coeff[2] = (double *)malloc(N * sizeof(double));
CHECK_ALLOCATION(coeff[0]);
CHECK_ALLOCATION(coeff[1]);
CHECK_ALLOCATION(coeff[2]);
double d1p1 = (y[1] - y[0]) / (x[1] - x[0]);
if (d1p1 > 0.99E30) {
coeff[1][0] = 0;
coeff[0][0] = 0;
} else {
coeff[1][0] = -0.5;
coeff[0][0] = (3 / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - d1p1);
}
double s = 0, r = 0;
for (int i = 1; i < N - 1; i++) {
s = (x[i] - x[i - 1]) / (x[i + 1] - x[i - 1]);
r = s * coeff[1][i - 1] + 2;
coeff[1][i] = (s - 1) / r;
coeff[0][i] = (6 *
((y[i + 1] - y[i]) / (x[i + 1] - x[i]) -
(y[i] - y[i - 1]) / (x[i] - x[i - 1])) /
(x[i + 1] - x[i - 1]) -
s * coeff[0][i - 1]) /
r;
}
coeff[0][N - 1] = 0;
coeff[1][N - 1] = 0;
coeff[2][N - 1] = 0;
for (int i = N - 2; i >= 0; i--) {
coeff[1][i] = coeff[1][i] * coeff[1][i + 1] + coeff[0][i];
}
for (int i = 0; i < N - 1; i++) {
s = x[i + 1] - x[i];
r = (coeff[1][i + 1] - coeff[1][i]) / 6;
coeff[2][i] = r / s;
coeff[1][i] = coeff[1][i] / 2;
coeff[0][i] = (y[i + 1] - y[i]) / s - (coeff[1][i] + r) * s;
}
return coeff;
}
double spline_integral(double *x, double *a, double **s, int size) {
double *b = s[0];
double *c = s[1];
double *d = s[2];
double dx = 0;
double integral = 0;
for (int i = 0; i < size - 1; i++) {
dx = x[i + 1] - x[i];
integral += dx * (a[i] + dx * (b[i] / 2 + dx * (c[i] / 3 + d[i] * dx / 4)));
}
return integral;
}
void frac_from_index(int index, double *coord, int *fftg) {
int t1 = index / (fftg[1] * fftg[2]);
int t2 = index % (fftg[1] * fftg[2]);
int t3 = t2 % fftg[2];
t2 /= fftg[2];
coord[0] = ((double)t1) / fftg[0];
coord[1] = ((double)t2) / fftg[1];
coord[2] = ((double)t3) / fftg[2];
}
void direction(double *cart, double *dir) {
double theta = 0, phi = 0;
double r = mag(cart);
theta = acos(cart[2] / r);
if (r - fabs(cart[2]) == 0)
phi = 0;
else
phi = acos(cart[0] / pow(cart[0] * cart[0] + cart[1] * cart[1], 0.5));
if (cart[1] < 0)
phi = 2 * PI - phi;
dir[0] = theta;
dir[1] = phi;
}
double sph_bessel(double k, double r, int l) {
double x = k * r;
if (l == 0)
return sin(x) / x;
else if (l == 1)
return sin(x) / (x * x) - cos(x) / x;
else if (l == 2)
return (3 / (x * x) - 1) * sin(x) / x - 3 * cos(x) / (x * x);
else if (l == 3)
return (15 / (x * x * x) - 6 / x) * sin(x) / x -
(15 / (x * x) - 1) * cos(x) / x;
else {
printf("ERROR: sph_bessel l too high");
return 0;
}
}
double sbf(double x, int l) {
if (x < 10e-6) {
if (l == 0)
return 1;
else
return 0;
}
double jlm1 = sin(x) / x;
double jl = sin(x) / (x * x) - cos(x) / x;
double jlp1 = 0;
if (l == 0)
return jlm1;
if (l == 1)
return jl;
for (int ll = 1; ll < l; ll++) {
jlp1 = (2 * ll + 1) / x * jl - jlm1;
jlm1 = jl;
jl = jlp1;
}
return jlp1;
}
pswf_t *expand_symm_wf(pswf_t *rwf, int num_kpts, int *maps, double *ops,
double *drs, double *kws, int *trs) {
double *lattice = rwf->lattice;
double *reclattice = rwf->reclattice;
pswf_t *wf = (pswf_t *)malloc(sizeof(pswf_t));
wf->num_elems = rwf->num_elems;
wf->num_sites = rwf->num_sites;
wf->pps = NULL;
wf->G_bounds = (int *)malloc(6 * sizeof(int));
for (int i = 0; i < 6; i++) {
wf->G_bounds[i] = 0; // rwf->G_bounds[i];
}
wf->kpts = (kpoint_t **)malloc(num_kpts * rwf->nspin * sizeof(kpoint_t *));
wf->nspin = rwf->nspin;
wf->nband = rwf->nband;
wf->nwk = num_kpts;
wf->lattice = (double *)malloc(9 * sizeof(double));
wf->reclattice = (double *)malloc(9 * sizeof(double));
for (int i = 0; i < 9; i++) {
wf->lattice[i] = lattice[i];
wf->reclattice[i] = reclattice[i];
}
wf->fftg = NULL;
wf->is_ncl = rwf->is_ncl;
wf->num_aug_overlap_sites = 0;
wf->dcoords = NULL;
wf->overlaps = NULL;
wf->num_projs = NULL;
wf->wp_num = 0;
//#pragma omp parallel for
for (int knum = 0; knum < num_kpts * wf->nspin; knum++) {
wf->kpts[knum] = (kpoint_t *)malloc(sizeof(kpoint_t));
double pw[3] = {0, 0, 0};
int rnum = maps[knum % num_kpts];
int tr = trs[knum % num_kpts];
if (knum >= num_kpts && rwf->nspin == 2) {
rnum += rwf->nwk;
}
kpoint_t *kpt = wf->kpts[knum];
kpoint_t *rkpt = rwf->kpts[rnum];
kpt->up = rkpt->up;
kpt->num_waves = rkpt->num_waves;
kpt->k = (double *)malloc(3 * sizeof(double));
rotation_transform(kpt->k, ops + OPSIZE * (knum % num_kpts), rkpt->k);
if (tr == 1) {
kpt->k[0] *= -1;
kpt->k[1] *= -1;
kpt->k[2] *= -1;
}
double kdiff[3] = {round(kpt->k[0]), round(kpt->k[1]), round(kpt->k[2])};
kpt->k[0] -= kdiff[0];
kpt->k[1] -= kdiff[1];
kpt->k[2] -= kdiff[2];
if (fabs(kpt->k[0] + 0.5) < 0.0001) {
kdiff[0] -= 1;
kpt->k[0] += 1;
}
if (fabs(kpt->k[1] + 0.5) < 0.0001) {
kdiff[1] -= 1;
kpt->k[1] += 1;
}
if (fabs(kpt->k[2] + 0.5) < 0.0001) {
kdiff[2] -= 1;
kpt->k[2] += 1;
}
// printf("OLD KPT %lf %lf %lf\n", rkpt->k[0], rkpt->k[1], rkpt->k[2]);
// printf("NEW KPT %lf %lf %lf\n", kpt->k[0], kpt->k[1], kpt->k[2]);
// kpt->Gs = (int*) malloc(3 * kpt->num_waves * sizeof(int));
kpt->weight = kws[knum % num_kpts];
kpt->num_bands = rkpt->num_bands;
kpt->bands = (band_t **)malloc(kpt->num_bands * sizeof(band_t *));
kpt->expansion = NULL;
int *igall = malloc(3 * kpt->num_waves * sizeof(int));
if (igall == NULL) {
ALLOCATION_FAILED();
}
int nb1max = rwf->G_bounds[1] - rwf->G_bounds[0] + 2;
int nb2max = rwf->G_bounds[3] - rwf->G_bounds[2] + 2;
int nb3max = rwf->G_bounds[5] - rwf->G_bounds[4] + 2;
double encut = rwf->encut;
double *b1 = reclattice;
double *b2 = reclattice + 3;
double *b3 = reclattice + 6;
int ncnt = -1;
for (int ig3 = 0; ig3 <= 2 * nb3max; ig3++) {
int ig3p = ig3;
if (ig3 > nb3max)
ig3p = ig3 - 2 * nb3max - 1;
for (int ig2 = 0; ig2 <= 2 * nb2max; ig2++) {
int ig2p = ig2;
if (ig2 > nb2max)
ig2p = ig2 - 2 * nb2max - 1;
for (int ig1 = 0; ig1 <= 2 * nb1max; ig1++) {
int ig1p = ig1;
if (ig1 > nb1max)
ig1p = ig1 - 2 * nb1max - 1;
double sumkg[3];
for (int j = 0; j < 3; j++) {
sumkg[j] = (kpt->k[0] + ig1p) * b1[j] + (kpt->k[1] + ig2p) * b2[j] +
(kpt->k[2] + ig3p) * b3[j];
}
double gtot = mag(sumkg);
double etot = pow(gtot, 2.0) / CCONST;
// printf("%lf %lf\n", etot, gtot);
if (etot <= encut) {
ncnt++;
igall[ncnt * 3 + 0] = ig1p;
igall[ncnt * 3 + 1] = ig2p;
igall[ncnt * 3 + 2] = ig3p;
if (ig1p < wf->G_bounds[0])
wf->G_bounds[0] = ig1p;
else if (ig1p > wf->G_bounds[1])
wf->G_bounds[1] = ig1p;
if (ig2p < wf->G_bounds[2])
wf->G_bounds[2] = ig2p;
else if (ig2p > wf->G_bounds[3])
wf->G_bounds[3] = ig2p;
if (ig3p < wf->G_bounds[4])
wf->G_bounds[4] = ig3p;
else if (ig3p > wf->G_bounds[5])
wf->G_bounds[5] = ig3p;
}
}
}
}
ncnt++;
if (ncnt * 2 == rkpt->num_waves) {
printf("This is an NCL wavefunction!\n");
wf->is_ncl = 1;
for (int iplane = 0; iplane < rkpt->num_waves / 2; iplane++) {
igall[3 * (rkpt->num_waves / 2 + iplane) + 0] = igall[3 * iplane + 0];
igall[3 * (rkpt->num_waves / 2 + iplane) + 1] = igall[3 * iplane + 1];
igall[3 * (rkpt->num_waves / 2 + iplane) + 2] = igall[3 * iplane + 2];
}
} else if (ncnt != rkpt->num_waves) {
printf("ERROR %d %d %lf %lf %lf %lf\n", ncnt, kpt->num_waves, kpt->k[0],
kpt->k[1], kpt->k[2], CCONST);
}
kpt->Gs = igall;
int ngx = wf->G_bounds[1] - wf->G_bounds[0] + 1;
int gxmin = wf->G_bounds[0];
int ngy = wf->G_bounds[3] - wf->G_bounds[2] + 1;
int gymin = wf->G_bounds[2];
int ngz = wf->G_bounds[5] - wf->G_bounds[4] + 1;
int gzmin = wf->G_bounds[4];
int *kptinds = (int *)malloc(ngx * ngy * ngz * sizeof(int));
for (int w = 0; w < ngx * ngy * ngz; w++)
kptinds[w] = -1;
int gx, gy, gz;
int *gmaps = (int *)malloc(kpt->num_waves * sizeof(int));
float complex *factors =
(float complex *)malloc(kpt->num_waves * sizeof(float complex));
for (int w = 0; w < kpt->num_waves; w++)
gmaps[w] = -1;
for (int w = 0; w < rkpt->num_waves; w++) {
gx = kpt->Gs[3 * w + 0];
gy = kpt->Gs[3 * w + 1];
gz = kpt->Gs[3 * w + 2];
kptinds[(gx - gxmin) * ngy * ngz + (gy - gymin) * ngz + (gz - gzmin)] = w;
}
double *dr = drs + 3 * (knum % num_kpts);
double *op = ops + OPSIZE * (knum % num_kpts);
for (int g = 0; g < kpt->num_waves; g++) {
// pw[0] = rkpt->k[0] + rkpt->Gs[3*g+0];
// pw[1] = rkpt->k[1] + rkpt->Gs[3*g+1];
// pw[2] = rkpt->k[2] + rkpt->Gs[3*g+2];
pw[0] = rkpt->Gs[3 * g + 0];
pw[1] = rkpt->Gs[3 * g + 1];
pw[2] = rkpt->Gs[3 * g + 2];
rotation_transform(pw, ops + OPSIZE * (knum % num_kpts), pw);
if (tr == 1) {
pw[0] *= -1;
pw[1] *= -1;
pw[2] *= -1;
}
// pw[0] -= kpt->k[0];
// pw[1] -= kpt->k[1];
// pw[2] -= kpt->k[2];
pw[0] += kdiff[0];
pw[1] += kdiff[1];
pw[2] += kdiff[2];
gx = (int)round(pw[0]);
gy = (int)round(pw[1]);
gz = (int)round(pw[2]);
gmaps[kptinds[(gx - gxmin) * ngy * ngz + (gy - gymin) * ngz +
(gz - gzmin)]] = g;
if (tr == 0) {
factors[kptinds[(gx - gxmin) * ngy * ngz + (gy - gymin) * ngz +
(gz - gzmin)]] =
cexpf(-I * 2 * PI * (dot(kpt->k, dr) + dot(pw, dr)));
} else {
factors[kptinds[(gx - gxmin) * ngy * ngz + (gy - gymin) * ngz +
(gz - gzmin)]] =
cexpf(I * 2 * PI * (dot(kpt->k, dr) + dot(pw, dr)));
}
if (kptinds[(gx - gxmin) * ngy * ngz + (gy - gymin) * ngz +
(gz - gzmin)] < 0) {
printf("ERROR, BAD PLANE WAVE MAPPING %d %d %d %d %d %d %lf %lf %lf\n "
"%lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
rkpt->Gs[3 * g + 0], rkpt->Gs[3 * g + 1], rkpt->Gs[3 * g + 2],
gx, gy, gz, pw[0], pw[1], pw[2], op[0], op[1], op[2], op[3],
op[4], op[5], op[6], op[7], op[8]);
}
// printf("OLD G %d %d %d\n", okpt->Gs[3*g+0], okpt->Gs[3*g+1],
// okpt->Gs[3*g+2]); printf("NEW G %d %d %d\n", kpt->Gs[3*g+0],
// kpt->Gs[3*g+1], kpt->Gs[3*g+2]);
}
for (int b = 0; b < kpt->num_bands; b++) {
kpt->bands[b] = (band_t *)malloc(sizeof(band_t));
kpt->bands[b]->n = rkpt->bands[b]->n;
kpt->bands[b]->num_waves = rkpt->bands[b]->num_waves;
kpt->bands[b]->occ = rkpt->bands[b]->occ;
kpt->bands[b]->energy = rkpt->bands[b]->energy;
kpt->bands[b]->Cs =
(float complex *)malloc(kpt->num_waves * sizeof(float complex));
kpt->bands[b]->CRs = NULL;
kpt->bands[b]->CAs = NULL;
kpt->bands[b]->projections = NULL;
kpt->bands[b]->up_projections = NULL;
kpt->bands[b]->down_projections = NULL;
kpt->bands[b]->wave_projections = NULL;
// double total = 0;
for (int w = 0; w < kpt->num_waves; w++) {
if (gmaps[w] < 0) {
printf("ERROR, INCOMPLETE PLANE WAVE MAPPING\n");
}
kpt->bands[b]->Cs[w] = factors[w] * rkpt->bands[b]->Cs[gmaps[w]];
if (tr == 1) {
kpt->bands[b]->Cs[w] = conj(kpt->bands[b]->Cs[w]);
}
// total += cabs(cabs(kpt->bands[b]->Cs[w]) -
// cabs(okpt->bands[b]->Cs[w]));
}
// printf("energies %lf %lf %e\n", kpt->bands[b]->energy,
// okpt->bands[b]->energy, total);
}
free(gmaps);
free(kptinds);
free(factors);
}
wf->encut = rwf->encut;
return wf;
}
void CHECK_ALLOCATION(void *ptr) {
if (ptr == NULL) {
ALLOCATION_FAILED();
}
}
void ALLOCATION_FAILED(void) {
printf("ALLOCATION FAILED\n");
exit(-1);
}
void CHECK_STATUS(int status) {
if (status != 0) {
printf("ROUTINE FAILED WITH STATUS %d:\n", status);
char *message = DftiErrorMessage(status);
printf("%s\n", message);
exit(-1);
}
}
double *get_smooth_wave(ppot_t *lst, int num) {
// return lst[num].funcs[0].smooth_diffwave;
return lst[num].smooth_grid;
}
|
gimple.h | /* Modula-3: modified */
/* Gimple IR definitions.
Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_GIMPLE_H
#define GCC_GIMPLE_H
#include "pointer-set.h"
#include "vec.h"
#include "ggc.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "tree-ssa-operands.h"
#ifdef __cplusplus
extern "C" {
#endif
DEF_VEC_P(gimple);
DEF_VEC_ALLOC_P(gimple,heap);
DEF_VEC_ALLOC_P(gimple,gc);
typedef gimple *gimple_p;
DEF_VEC_P(gimple_p);
DEF_VEC_ALLOC_P(gimple_p,heap);
DEF_VEC_P(gimple_seq);
DEF_VEC_ALLOC_P(gimple_seq,gc);
DEF_VEC_ALLOC_P(gimple_seq,heap);
/* For each block, the PHI nodes that need to be rewritten are stored into
these vectors. */
typedef VEC(gimple, heap) *gimple_vec;
DEF_VEC_P (gimple_vec);
DEF_VEC_ALLOC_P (gimple_vec, heap);
enum gimple_code {
#define DEFGSCODE(SYM, STRING, STRUCT) SYM,
#include "gimple.def"
#undef DEFGSCODE
LAST_AND_UNUSED_GIMPLE_CODE
};
extern const char *const gimple_code_name[];
extern const unsigned char gimple_rhs_class_table[];
/* Error out if a gimple tuple is addressed incorrectly. */
#if defined ENABLE_GIMPLE_CHECKING
extern void gimple_check_failed (const_gimple, const char *, int, \
const char *, enum gimple_code, \
enum tree_code) ATTRIBUTE_NORETURN;
#define GIMPLE_CHECK(GS, CODE) \
do { \
const_gimple __gs = (GS); \
if (gimple_code (__gs) != (CODE)) \
gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \
(CODE), ERROR_MARK); \
} while (0)
#else /* not ENABLE_GIMPLE_CHECKING */
#define GIMPLE_CHECK(GS, CODE) (void)0
#endif
/* Class of GIMPLE expressions suitable for the RHS of assignments. See
get_gimple_rhs_class. */
enum gimple_rhs_class
{
GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */
GIMPLE_BINARY_RHS, /* The expression is a binary operation. */
GIMPLE_UNARY_RHS, /* The expression is a unary operation. */
GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA
name, a _DECL, a _REF, etc. */
};
/* Specific flags for individual GIMPLE statements. These flags are
always stored in gimple_statement_base.subcode and they may only be
defined for statement codes that do not use sub-codes.
Values for the masks can overlap as long as the overlapping values
are never used in the same statement class.
The maximum mask value that can be defined is 1 << 15 (i.e., each
statement code can hold up to 16 bitflags).
Keep this list sorted. */
enum gf_mask {
GF_ASM_INPUT = 1 << 0,
GF_ASM_VOLATILE = 1 << 1,
GF_CALL_CANNOT_INLINE = 1 << 0,
GF_CALL_FROM_THUNK = 1 << 1,
GF_CALL_RETURN_SLOT_OPT = 1 << 2,
GF_CALL_TAILCALL = 1 << 3,
GF_CALL_VA_ARG_PACK = 1 << 4,
GF_CALL_NOTHROW = 1 << 5,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
/* True on an GIMPLE_OMP_RETURN statement if the return does not require
a thread synchronization via some sort of barrier. The exact barrier
that would otherwise be emitted is dependent on the OMP statement with
which this return is associated. */
GF_OMP_RETURN_NOWAIT = 1 << 0,
GF_OMP_SECTION_LAST = 1 << 0,
GF_PREDICT_TAKEN = 1 << 15
};
/* Currently, there's only one type of gimple debug stmt. Others are
envisioned, for example, to enable the generation of is_stmt notes
in line number information, to mark sequence points, etc. This
subcode is to be used to tell them apart. */
enum gimple_debug_subcode {
GIMPLE_DEBUG_BIND = 0
};
/* Masks for selecting a pass local flag (PLF) to work on. These
masks are used by gimple_set_plf and gimple_plf. */
enum plf_mask {
GF_PLF_1 = 1 << 0,
GF_PLF_2 = 1 << 1
};
/* A node in a gimple_seq_d. */
struct GTY((chain_next ("%h.next"), chain_prev ("%h.prev"))) gimple_seq_node_d {
gimple stmt;
struct gimple_seq_node_d *prev;
struct gimple_seq_node_d *next;
};
/* A double-linked sequence of gimple statements. */
struct GTY ((chain_next ("%h.next_free"))) gimple_seq_d {
/* First and last statements in the sequence. */
gimple_seq_node first;
gimple_seq_node last;
/* Sequences are created/destroyed frequently. To minimize
allocation activity, deallocated sequences are kept in a pool of
available sequences. This is the pointer to the next free
sequence in the pool. */
gimple_seq next_free;
};
/* Return the first node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_first (const_gimple_seq s)
{
return s ? s->first : NULL;
}
/* Return the first statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_first_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
return (n) ? n->stmt : NULL;
}
/* Return the last node in GIMPLE sequence S. */
static inline gimple_seq_node
gimple_seq_last (const_gimple_seq s)
{
return s ? s->last : NULL;
}
/* Return the last statement in GIMPLE sequence S. */
static inline gimple
gimple_seq_last_stmt (const_gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
return (n) ? n->stmt : NULL;
}
/* Set the last node in GIMPLE sequence S to LAST. */
static inline void
gimple_seq_set_last (gimple_seq s, gimple_seq_node last)
{
s->last = last;
}
/* Set the first node in GIMPLE sequence S to FIRST. */
static inline void
gimple_seq_set_first (gimple_seq s, gimple_seq_node first)
{
s->first = first;
}
/* Return true if GIMPLE sequence S is empty. */
static inline bool
gimple_seq_empty_p (const_gimple_seq s)
{
return s == NULL || s->first == NULL;
}
void gimple_seq_add_stmt (gimple_seq *, gimple);
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void gimplify_seq_add_stmt (gimple_seq *, gimple);
/* Allocate a new sequence and initialize its first element with STMT. */
static inline gimple_seq
gimple_seq_alloc_with_stmt (gimple stmt)
{
gimple_seq seq = NULL;
gimple_seq_add_stmt (&seq, stmt);
return seq;
}
/* Returns the sequence of statements in BB. */
static inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL) && bb->il.gimple) ? bb->il.gimple->seq : NULL;
}
/* Sets the sequence of statements in BB to SEQ. */
static inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_assert (!(bb->flags & BB_RTL));
bb->il.gimple->seq = seq;
}
/* Iterator object for GIMPLE statement sequences. */
typedef struct
{
/* Sequence node holding the current statement. */
gimple_seq_node ptr;
/* Sequence and basic block holding the statement. These fields
are necessary to handle edge cases such as when statement is
added to an empty basic block or when the last statement of a
block/sequence is removed. */
gimple_seq seq;
basic_block bb;
} gimple_stmt_iterator;
/* Data structure definitions for GIMPLE tuples. NOTE: word markers
are for 64 bit hosts. */
struct GTY(()) gimple_statement_base {
/* [ WORD 1 ]
Main identifying code for a tuple. */
ENUM_BITFIELD(gimple_code, code, 8);
/* Nonzero if a warning should not be emitted on this tuple. */
unsigned int no_warning : 1;
/* Nonzero if this tuple has been visited. Passes are responsible
for clearing this bit before using it. */
unsigned int visited : 1;
/* Nonzero if this tuple represents a non-temporal move. */
unsigned int nontemporal_move : 1;
/* Pass local flags. These flags are free for any pass to use as
they see fit. Passes should not assume that these flags contain
any useful value when the pass starts. Any initial state that
the pass requires should be set on entry to the pass. See
gimple_set_plf and gimple_plf for usage. */
unsigned int plf : 2;
/* Nonzero if this statement has been modified and needs to have its
operands rescanned. */
unsigned modified : 1;
/* Nonzero if this statement contains volatile operands. */
unsigned has_volatile_ops : 1;
/* Padding to get subcode to 16 bit alignment. */
unsigned pad : 1;
/* The SUBCODE field can be used for tuple-specific flags for tuples
that do not require subcodes. Note that SUBCODE should be at
least as wide as tree codes, as several tuples store tree codes
in there. */
unsigned int subcode : 16;
/* UID of this statement. This is used by passes that want to
assign IDs to statements. It must be assigned and used by each
pass. By default it should be assumed to contain garbage. */
unsigned uid;
/* [ WORD 2 ]
Locus information for debug info. */
location_t location;
/* Number of operands in this tuple. */
unsigned num_ops;
/* [ WORD 3 ]
Basic block holding this statement. */
struct basic_block_def *bb;
/* [ WORD 4 ]
Lexical block holding this statement. */
tree block;
};
/* Base structure for tuples with operands. */
struct GTY(()) gimple_statement_with_ops_base
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ]
SSA operand vectors. NOTE: It should be possible to
amalgamate these vectors with the operand vector OP. However,
the SSA operand vectors are organized differently and contain
more information (like immediate use chaining). */
struct def_optype_d GTY((skip (""))) *def_ops;
struct use_optype_d GTY((skip (""))) *use_ops;
};
/* Statements that take register operands. */
struct GTY(()) gimple_statement_with_ops
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.opbase.gsbase.num_ops"))) op[1];
};
/* Base for statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops_base
{
/* [ WORD 1-6 ] */
struct gimple_statement_with_ops_base opbase;
/* [ WORD 7-8 ]
Virtual operands for this statement. The GC will pick them
up via the ssa_names array. */
tree GTY((skip (""))) vdef;
tree GTY((skip (""))) vuse;
};
/* Statements that take both memory and register operands. */
struct GTY(()) gimple_statement_with_memory_ops
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* OpenMP statements (#pragma omp). */
struct GTY(()) gimple_statement_omp {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
gimple_seq body;
};
/* GIMPLE_BIND */
struct GTY(()) gimple_statement_bind {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Variables declared in this scope. */
tree vars;
/* [ WORD 6 ]
This is different than the BLOCK field in gimple_statement_base,
which is analogous to TREE_BLOCK (i.e., the lexical block holding
this statement). This field is the equivalent of BIND_EXPR_BLOCK
in tree land (i.e., the lexical scope defined by this bind). See
gimple-low.c. */
tree block;
/* [ WORD 7 ] */
gimple_seq body;
};
/* GIMPLE_CATCH */
struct GTY(()) gimple_statement_catch {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree types;
/* [ WORD 6 ] */
gimple_seq handler;
};
/* GIMPLE_EH_FILTER */
struct GTY(()) gimple_statement_eh_filter {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Filter types. */
tree types;
/* [ WORD 6 ]
Failure actions. */
gimple_seq failure;
};
/* GIMPLE_EH_MUST_NOT_THROW */
struct GTY(()) gimple_statement_eh_mnt {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] Abort function decl. */
tree fndecl;
};
/* GIMPLE_PHI */
struct GTY(()) gimple_statement_phi {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
unsigned capacity;
unsigned nargs;
/* [ WORD 6 ] */
tree result;
/* [ WORD 7 ] */
struct phi_arg_d GTY ((length ("%h.nargs"))) args[1];
};
/* GIMPLE_RESX, GIMPLE_EH_DISPATCH */
struct GTY(()) gimple_statement_eh_ctrl
{
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Exception region number. */
int region;
};
/* GIMPLE_TRY */
struct GTY(()) gimple_statement_try {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ]
Expression to evaluate. */
gimple_seq eval;
/* [ WORD 6 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* Kind of GIMPLE_TRY statements. */
enum gimple_try_flags
{
/* A try/catch. */
GIMPLE_TRY_CATCH = 1 << 0,
/* A try/finally. */
GIMPLE_TRY_FINALLY = 1 << 1,
GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY,
/* Analogous to TRY_CATCH_IS_CLEANUP. */
GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2
};
/* GIMPLE_WITH_CLEANUP_EXPR */
struct GTY(()) gimple_statement_wce {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be
executed if an exception is thrown, not on normal exit of its
scope. This flag is analogous to the CLEANUP_EH_ONLY flag
in TARGET_EXPRs. */
/* [ WORD 5 ]
Cleanup expression. */
gimple_seq cleanup;
};
/* GIMPLE_ASM */
struct GTY(()) gimple_statement_asm
{
/* [ WORD 1-8 ] */
struct gimple_statement_with_memory_ops_base membase;
/* [ WORD 9 ]
__asm__ statement. */
const char *string;
/* [ WORD 10 ]
Number of inputs, outputs, clobbers, labels. */
unsigned char ni;
unsigned char no;
unsigned char nc;
unsigned char nl;
/* [ WORD 11 ]
Operand vector. NOTE! This must always be the last field
of this structure. In particular, this means that this
structure cannot be embedded inside another one. */
tree GTY((length ("%h.membase.opbase.gsbase.num_ops"))) op[1];
};
/* GIMPLE_OMP_CRITICAL */
struct GTY(()) gimple_statement_omp_critical {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Critical section name. */
tree name;
};
struct GTY(()) gimple_omp_for_iter {
/* Condition code. */
enum tree_code cond;
/* Index variable. */
tree index;
/* Initial value. */
tree initial;
/* Final value. */
tree final;
/* Increment. */
tree incr;
};
/* GIMPLE_OMP_FOR */
struct GTY(()) gimple_statement_omp_for {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
Number of elements in iter array. */
size_t collapse;
/* [ WORD 8 ] */
struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter;
/* [ WORD 9 ]
Pre-body evaluated before the loop body begins. */
gimple_seq pre_body;
};
/* GIMPLE_OMP_PARALLEL */
struct GTY(()) gimple_statement_omp_parallel {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ]
Clauses. */
tree clauses;
/* [ WORD 7 ]
Child function holding the body of the parallel region. */
tree child_fn;
/* [ WORD 8 ]
Shared data argument. */
tree data_arg;
};
/* GIMPLE_OMP_TASK */
struct GTY(()) gimple_statement_omp_task {
/* [ WORD 1-8 ] */
struct gimple_statement_omp_parallel par;
/* [ WORD 9 ]
Child function holding firstprivate initialization if needed. */
tree copy_fn;
/* [ WORD 10-11 ]
Size and alignment in bytes of the argument data block. */
tree arg_size;
tree arg_align;
};
/* GIMPLE_OMP_SECTION */
/* Uses struct gimple_statement_omp. */
/* GIMPLE_OMP_SECTIONS */
struct GTY(()) gimple_statement_omp_sections {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
/* [ WORD 7 ]
The control variable used for deciding which of the sections to
execute. */
tree control;
};
/* GIMPLE_OMP_CONTINUE.
Note: This does not inherit from gimple_statement_omp, because we
do not need the body field. */
struct GTY(()) gimple_statement_omp_continue {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree control_def;
/* [ WORD 6 ] */
tree control_use;
};
/* GIMPLE_OMP_SINGLE */
struct GTY(()) gimple_statement_omp_single {
/* [ WORD 1-5 ] */
struct gimple_statement_omp omp;
/* [ WORD 6 ] */
tree clauses;
};
/* GIMPLE_OMP_ATOMIC_LOAD.
Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp
contains a sequence, which we don't need here. */
struct GTY(()) gimple_statement_omp_atomic_load {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5-6 ] */
tree rhs, lhs;
};
/* GIMPLE_OMP_ATOMIC_STORE.
See note on GIMPLE_OMP_ATOMIC_LOAD. */
struct GTY(()) gimple_statement_omp_atomic_store {
/* [ WORD 1-4 ] */
struct gimple_statement_base gsbase;
/* [ WORD 5 ] */
tree val;
};
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
LAST_GSS_ENUM
};
#undef DEFGSSTRUCT
/* Define the overall contents of a gimple tuple. It may be any of the
structures declared above for various types of tuples. */
union GTY ((desc ("gimple_statement_structure (&%h)"))) gimple_statement_d {
struct gimple_statement_base GTY ((tag ("GSS_BASE"))) gsbase;
struct gimple_statement_with_ops GTY ((tag ("GSS_WITH_OPS"))) gsops;
struct gimple_statement_with_memory_ops_base GTY ((tag ("GSS_WITH_MEM_OPS_BASE"))) gsmembase;
struct gimple_statement_with_memory_ops GTY ((tag ("GSS_WITH_MEM_OPS"))) gsmem;
struct gimple_statement_omp GTY ((tag ("GSS_OMP"))) omp;
struct gimple_statement_bind GTY ((tag ("GSS_BIND"))) gimple_bind;
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
struct gimple_statement_wce GTY ((tag ("GSS_WCE"))) gimple_wce;
struct gimple_statement_asm GTY ((tag ("GSS_ASM"))) gimple_asm;
struct gimple_statement_omp_critical GTY ((tag ("GSS_OMP_CRITICAL"))) gimple_omp_critical;
struct gimple_statement_omp_for GTY ((tag ("GSS_OMP_FOR"))) gimple_omp_for;
struct gimple_statement_omp_parallel GTY ((tag ("GSS_OMP_PARALLEL"))) gimple_omp_parallel;
struct gimple_statement_omp_task GTY ((tag ("GSS_OMP_TASK"))) gimple_omp_task;
struct gimple_statement_omp_sections GTY ((tag ("GSS_OMP_SECTIONS"))) gimple_omp_sections;
struct gimple_statement_omp_single GTY ((tag ("GSS_OMP_SINGLE"))) gimple_omp_single;
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
};
/* In gimple.c. */
/* Offset in bytes to the location of the operand vector.
Zero if there is no operand vector for this tuple structure. */
extern size_t const gimple_ops_offset_[];
/* Map GIMPLE codes to GSS codes. */
extern enum gimple_statement_structure_enum const gss_for_code_[];
/* This variable holds the currently expanded gimple statement for purposes
of comminucating the profile info to the builtin expanders. */
extern gimple currently_expanding_gimple_stmt;
gimple gimple_build_return (tree);
gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL);
#define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO)
void extract_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_assign_with_ops_stat (enum tree_code, tree, tree,
tree MEM_STAT_DECL);
#define gimple_build_assign_with_ops(c,o1,o2,o3) \
gimple_build_assign_with_ops_stat (c, o1, o2, o3 MEM_STAT_INFO)
gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL);
#define gimple_build_debug_bind(var,val,stmt) \
gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO)
gimple gimple_build_call_vec (tree, VEC(tree, heap) *);
gimple gimple_build_call (tree, unsigned, ...);
gimple gimple_build_call_from_tree (tree);
gimple gimplify_assign (tree, tree, gimple_seq *);
gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree);
gimple gimple_build_label (tree label);
gimple gimple_build_goto (tree dest);
gimple gimple_build_nop (void);
gimple gimple_build_bind (tree, gimple_seq, tree);
gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *,
VEC(tree,gc) *, VEC(tree,gc) *);
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
gimple gimple_build_eh_dispatch (int);
gimple gimple_build_switch_nlabels (unsigned, tree, tree);
gimple gimple_build_switch (unsigned, tree, tree, ...);
gimple gimple_build_switch_vec (tree, tree, VEC(tree,heap) *);
gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree);
gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree);
gimple gimple_build_omp_for (gimple_seq, tree, size_t, gimple_seq);
gimple gimple_build_omp_critical (gimple_seq, tree);
gimple gimple_build_omp_section (gimple_seq);
gimple gimple_build_omp_continue (tree, tree);
gimple gimple_build_omp_master (gimple_seq);
gimple gimple_build_omp_return (bool);
gimple gimple_build_omp_ordered (gimple_seq);
gimple gimple_build_omp_sections (gimple_seq, tree);
gimple gimple_build_omp_sections_switch (void);
gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (VEC(tree,heap) *);
void gimple_set_body (tree, gimple_seq);
gimple_seq gimple_body (tree);
bool gimple_has_body_p (tree);
gimple_seq gimple_seq_alloc (void);
void gimple_seq_free (gimple_seq);
void gimple_seq_add_seq (gimple_seq *, gimple_seq);
gimple_seq gimple_seq_copy (gimple_seq);
int gimple_call_flags (const_gimple);
bool gimple_assign_copy_p (gimple);
bool gimple_assign_ssa_name_copy_p (gimple);
bool gimple_assign_single_p (gimple);
bool gimple_assign_unary_nop_p (gimple);
void gimple_set_bb (gimple, struct basic_block_def *);
void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree);
void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *, enum tree_code,
tree, tree);
tree gimple_get_lhs (const_gimple);
void gimple_set_lhs (gimple, tree);
void gimple_replace_lhs (gimple, tree);
gimple gimple_copy (gimple);
bool is_gimple_operand (const_tree);
void gimple_set_modified (gimple, bool);
void gimple_cond_get_ops_from_tree (tree, enum tree_code *, tree *, tree *);
gimple gimple_build_cond_from_tree (tree, tree, tree);
void gimple_cond_set_condition_from_tree (gimple, tree);
bool gimple_has_side_effects (const_gimple);
bool gimple_rhs_has_side_effects (const_gimple);
bool gimple_could_trap_p (gimple);
bool gimple_assign_rhs_could_trap_p (gimple);
void gimple_regimplify_operands (gimple, gimple_stmt_iterator *);
bool empty_body_p (gimple_seq);
unsigned get_gimple_rhs_num_ops (enum tree_code);
#define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO)
gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL);
const char *gimple_decl_printable_name (tree, int);
tree gimple_fold_obj_type_ref (tree, tree);
/* Returns true iff T is a valid GIMPLE statement. */
extern bool is_gimple_stmt (tree);
/* Returns true iff TYPE is a valid type for a scalar register variable. */
extern bool is_gimple_reg_type (tree);
/* Returns true iff T is a scalar register variable. */
extern bool is_gimple_reg (tree);
/* Returns true iff T is any sort of variable. */
extern bool is_gimple_variable (tree);
/* Returns true iff T is any sort of symbol. */
extern bool is_gimple_id (tree);
/* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */
extern bool is_gimple_min_lval (tree);
/* Returns true iff T is something whose address can be taken. */
extern bool is_gimple_addressable (tree);
/* Returns true iff T is any valid GIMPLE lvalue. */
extern bool is_gimple_lvalue (tree);
/* Returns true iff T is a GIMPLE address. */
bool is_gimple_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address. */
bool is_gimple_invariant_address (const_tree);
/* Returns true iff T is a GIMPLE invariant address at interprocedural
level. */
bool is_gimple_ip_invariant_address (const_tree);
/* Returns true iff T is a valid GIMPLE constant. */
bool is_gimple_constant (const_tree);
/* Returns true iff T is a GIMPLE restricted function invariant. */
extern bool is_gimple_min_invariant (const_tree);
/* Returns true iff T is a GIMPLE restricted interprecodural invariant. */
extern bool is_gimple_ip_invariant (const_tree);
/* Returns true iff T is a GIMPLE rvalue. */
extern bool is_gimple_val (tree);
/* Returns true iff T is a GIMPLE asm statement input. */
extern bool is_gimple_asm_val (tree);
/* Returns true iff T is a valid rhs for a MODIFY_EXPR where the LHS is a
GIMPLE temporary, a renamed user variable, or something else,
respectively. */
extern bool is_gimple_reg_rhs (tree);
extern bool is_gimple_mem_rhs (tree);
/* Returns true iff T is a valid if-statement condition. */
extern bool is_gimple_condexpr (tree);
/* Returns true iff T is a type conversion. */
extern bool is_gimple_cast (tree);
/* Returns true iff T is a variable that does not need to live in memory. */
extern bool is_gimple_non_addressable (tree t);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
/* If T makes a function call, returns the CALL_EXPR operand. */
extern tree get_call_expr_in (tree t);
extern void recalculate_side_effects (tree);
extern bool compare_field_offset (tree, tree);
extern tree gimple_register_type (tree);
extern void print_gimple_types_stats (void);
extern void free_gimple_type_tables (void);
extern tree gimple_unsigned_type (tree);
extern tree gimple_signed_type (tree);
extern alias_set_type gimple_get_alias_set (tree);
extern void count_uses_and_derefs (tree, gimple, unsigned *, unsigned *,
unsigned *);
extern bool walk_stmt_load_store_addr_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool walk_stmt_load_store_ops (gimple, void *,
bool (*)(gimple, tree, void *),
bool (*)(gimple, tree, void *));
extern bool gimple_ior_addresses_taken (bitmap, gimple);
/* In gimplify.c */
extern tree create_tmp_var_raw (tree, const char *);
extern tree create_tmp_var_name (const char *);
extern tree create_tmp_var (tree, const char *);
extern tree get_initialized_tmp_var (tree, gimple_seq *, gimple_seq *);
extern tree get_formal_tmp_var (tree, gimple_seq *);
extern void declare_vars (tree, gimple, bool);
extern void annotate_all_with_location (gimple_seq, location_t);
/* Validation of GIMPLE expressions. Note that these predicates only check
the basic form of the expression, they don't recurse to make sure that
underlying nodes are also of the right form. */
typedef bool (*gimple_predicate)(tree);
/* FIXME we should deduce this from the predicate. */
enum fallback {
fb_none = 0, /* Do not generate a temporary. */
fb_rvalue = 1, /* Generate an rvalue to hold the result of a
gimplified expression. */
fb_lvalue = 2, /* Generate an lvalue to hold the result of a
gimplified expression. */
fb_mayfail = 4, /* Gimplification may fail. Error issued
afterwards. */
fb_either= fb_rvalue | fb_lvalue
};
typedef int fallback_t;
enum gimplify_status {
GS_ERROR = -2, /* Something Bad Seen. */
GS_UNHANDLED = -1, /* A langhook result for "I dunno". */
GS_OK = 0, /* We did something, maybe more to do. */
GS_ALL_DONE = 1 /* The expression is fully gimplified. */
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
VEC(gimple,heap) *bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
VEC(tree,heap) *case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
};
extern enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *,
bool (*) (tree), fallback_t);
extern void gimplify_type_sizes (tree, gimple_seq *);
extern void gimplify_one_sizepos (tree *, gimple_seq *);
extern bool gimplify_stmt (tree *, gimple_seq *);
extern gimple gimplify_body (tree *, tree, bool);
extern void push_gimplify_context (struct gimplify_ctx *);
extern void pop_gimplify_context (gimple);
extern void gimplify_and_add (tree, gimple_seq *);
/* Miscellaneous helpers. */
extern void gimple_add_tmp_var (tree);
extern gimple gimple_current_bind_expr (void);
extern VEC(gimple, heap) *gimple_bind_expr_stack (void);
extern tree voidify_wrapper_expr (tree, tree);
extern tree build_and_jump (tree *);
extern tree alloc_stmt_list (void);
extern void free_stmt_list (tree);
extern tree force_labels_r (tree *, int *, void *);
extern enum gimplify_status gimplify_va_arg_expr (tree *, gimple_seq *,
gimple_seq *);
struct gimplify_omp_ctx;
extern void omp_firstprivatize_variable (struct gimplify_omp_ctx *, tree);
extern tree gimple_boolify (tree);
extern gimple_predicate rhs_predicate_for (tree);
extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
/* In gimplify.c. */
extern void gimplify_function_tree (tree);
/* In cfgexpand.c. */
extern tree gimple_assign_rhs_to_tree (gimple);
/* In builtins.c */
extern bool validate_gimple_arglist (const_gimple, ...);
/* In tree-ssa.c */
extern bool tree_ssa_useless_type_conversion (tree);
extern tree tree_ssa_strip_useless_type_conversions (tree);
extern bool useless_type_conversion_p (tree, tree);
extern bool types_compatible_p (tree, tree);
/* Return the code for GIMPLE statement G. */
static inline enum gimple_code
gimple_code (const_gimple g)
{
return g->gsbase.code;
}
/* Return the GSS code used by a GIMPLE code. */
static inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
#ifdef ENABLE_CHECKING
gcc_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
#endif
return gss_for_code_[code];
}
/* Return which GSS code is used by GS. */
static inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple gs)
{
return gss_for_code (gimple_code (gs));
}
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
static inline bool
gimple_has_substatements (gimple g)
{
switch (gimple_code (g))
{
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
return true;
default:
return false;
}
}
/* Return the basic block holding statement G. */
static inline struct basic_block_def *
gimple_bb (const_gimple g)
{
return g->gsbase.bb;
}
/* Return the lexical scope block holding statement G. */
static inline tree
gimple_block (const_gimple g)
{
return g->gsbase.block;
}
/* Set BLOCK to be the lexical scope block holding statement G. */
static inline void
gimple_set_block (gimple g, tree block)
{
g->gsbase.block = block;
}
/* Return location information for statement G. */
static inline location_t
gimple_location (const_gimple g)
{
return g->gsbase.location;
}
/* Return pointer to location information for statement G. */
static inline const location_t *
gimple_location_ptr (const_gimple g)
{
return &g->gsbase.location;
}
/* Set location information for statement G. */
static inline void
gimple_set_location (gimple g, location_t location)
{
g->gsbase.location = location;
}
/* Return true if G contains location information. */
static inline bool
gimple_has_location (const_gimple g)
{
return gimple_location (g) != UNKNOWN_LOCATION;
}
/* Return the file name of the location of STMT. */
static inline const char *
gimple_filename (const_gimple stmt)
{
return LOCATION_FILE (gimple_location (stmt));
}
/* Return the line number of the location of STMT. */
static inline int
gimple_lineno (const_gimple stmt)
{
return LOCATION_LINE (gimple_location (stmt));
}
/* Determine whether SEQ is a singleton. */
static inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
&& (gimple_seq_first (seq) == gimple_seq_last (seq)));
}
/* Return true if no warnings should be emitted for statement STMT. */
static inline bool
gimple_no_warning_p (const_gimple stmt)
{
return stmt->gsbase.no_warning;
}
/* Set the no_warning flag of STMT to NO_WARNING. */
static inline void
gimple_set_no_warning (gimple stmt, bool no_warning)
{
stmt->gsbase.no_warning = (unsigned) no_warning;
}
/* Set the visited status on statement STMT to VISITED_P. */
static inline void
gimple_set_visited (gimple stmt, bool visited_p)
{
stmt->gsbase.visited = (unsigned) visited_p;
}
/* Return the visited status for statement STMT. */
static inline bool
gimple_visited_p (gimple stmt)
{
return stmt->gsbase.visited;
}
/* Set pass local flag PLF on statement STMT to VAL_P. */
static inline void
gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
stmt->gsbase.plf |= (unsigned int) plf;
else
stmt->gsbase.plf &= ~((unsigned int) plf);
}
/* Return the value of pass local flag PLF on statement STMT. */
static inline unsigned int
gimple_plf (gimple stmt, enum plf_mask plf)
{
return stmt->gsbase.plf & ((unsigned int) plf);
}
/* Set the UID of statement. */
static inline void
gimple_set_uid (gimple g, unsigned uid)
{
g->gsbase.uid = uid;
}
/* Return the UID of statement. */
static inline unsigned
gimple_uid (const_gimple g)
{
return g->gsbase.uid;
}
/* Return true if GIMPLE statement G has register or memory operands. */
static inline bool
gimple_has_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return true if GIMPLE statement G has memory operands. */
static inline bool
gimple_has_mem_ops (const_gimple g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
}
/* Return the set of DEF operands for statement G. */
static inline struct def_optype_d *
gimple_def_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.def_ops;
}
/* Set DEF to be the set of DEF operands for statement G. */
static inline void
gimple_set_def_ops (gimple g, struct def_optype_d *def)
{
gcc_assert (gimple_has_ops (g));
g->gsops.opbase.def_ops = def;
}
/* Return the set of USE operands for statement G. */
static inline struct use_optype_d *
gimple_use_ops (const_gimple g)
{
if (!gimple_has_ops (g))
return NULL;
return g->gsops.opbase.use_ops;
}
/* Set USE to be the set of USE operands for statement G. */
static inline void
gimple_set_use_ops (gimple g, struct use_optype_d *use)
{
gcc_assert (gimple_has_ops (g));
g->gsops.opbase.use_ops = use;
}
/* Return the set of VUSE operand for statement G. */
static inline use_operand_p
gimple_vuse_op (const_gimple g)
{
struct use_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_USE_OPERAND_P;
ops = g->gsops.opbase.use_ops;
if (ops
&& USE_OP_PTR (ops)->use == &g->gsmembase.vuse)
return USE_OP_PTR (ops);
return NULL_USE_OPERAND_P;
}
/* Return the set of VDEF operand for statement G. */
static inline def_operand_p
gimple_vdef_op (const_gimple g)
{
struct def_optype_d *ops;
if (!gimple_has_mem_ops (g))
return NULL_DEF_OPERAND_P;
ops = g->gsops.opbase.def_ops;
if (ops
&& DEF_OP_PTR (ops) == &g->gsmembase.vdef)
return DEF_OP_PTR (ops);
return NULL_DEF_OPERAND_P;
}
/* Return the single VUSE operand of the statement G. */
static inline tree
gimple_vuse (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree
gimple_vdef (const_gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL_TREE;
return g->gsmembase.vdef;
}
/* Return the single VUSE operand of the statement G. */
static inline tree *
gimple_vuse_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vuse;
}
/* Return the single VDEF operand of the statement G. */
static inline tree *
gimple_vdef_ptr (gimple g)
{
if (!gimple_has_mem_ops (g))
return NULL;
return &g->gsmembase.vdef;
}
/* Set the single VUSE operand of the statement G. */
static inline void
gimple_set_vuse (gimple g, tree vuse)
{
gcc_assert (gimple_has_mem_ops (g));
g->gsmembase.vuse = vuse;
}
/* Set the single VDEF operand of the statement G. */
static inline void
gimple_set_vdef (gimple g, tree vdef)
{
gcc_assert (gimple_has_mem_ops (g));
g->gsmembase.vdef = vdef;
}
/* Return true if statement G has operands and the modified field has
been set. */
static inline bool
gimple_modified_p (const_gimple g)
{
return (gimple_has_ops (g)) ? (bool) g->gsbase.modified : false;
}
/* Return the tree code for the expression computed by STMT. This is
only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For
GIMPLE_CALL, return CALL_EXPR as the expression code for
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
static inline enum tree_code
gimple_expr_code (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_COND)
return (enum tree_code) stmt->gsbase.subcode;
else if (code == GIMPLE_CALL)
return CALL_EXPR;
else
gcc_unreachable ();
}
/* Mark statement S as modified, and update it. */
static inline void
update_stmt (gimple s)
{
if (gimple_has_ops (s))
{
gimple_set_modified (s, true);
update_stmt_operands (s);
}
}
/* Update statement S if it has been optimized. */
static inline void
update_stmt_if_modified (gimple s)
{
if (gimple_modified_p (s))
update_stmt_operands (s);
}
/* Return true if statement STMT contains volatile operands. */
static inline bool
gimple_has_volatile_ops (const_gimple stmt)
{
if (gimple_has_mem_ops (stmt))
return stmt->gsbase.has_volatile_ops;
else
return false;
}
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
static inline void
gimple_set_has_volatile_ops (gimple stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
stmt->gsbase.has_volatile_ops = (unsigned) volatilep;
}
/* Return true if statement STMT may access memory. */
static inline bool
gimple_references_memory_p (gimple stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
}
/* Return the subcode for OMP statement S. */
static inline unsigned
gimple_omp_subcode (const_gimple s)
{
gcc_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
&& gimple_code (s) <= GIMPLE_OMP_SINGLE);
return s->gsbase.subcode;
}
/* Set the subcode for OMP statement S to SUBCODE. */
static inline void
gimple_omp_set_subcode (gimple s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
overflowing it. */
gcc_assert (subcode < (1 << 16));
s->gsbase.subcode = subcode;
}
/* Set the nowait flag on OMP_RETURN statement S. */
static inline void
gimple_omp_return_set_nowait (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
s->gsbase.subcode |= GF_OMP_RETURN_NOWAIT;
}
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
static inline bool
gimple_omp_return_nowait_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0;
}
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
static inline bool
gimple_omp_section_last_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0;
}
/* Set the GF_OMP_SECTION_LAST flag on G. */
static inline void
gimple_omp_section_set_last (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
g->gsbase.subcode |= GF_OMP_SECTION_LAST;
}
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
static inline bool
gimple_omp_parallel_combined_p (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0;
}
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
static inline void
gimple_omp_parallel_set_combined_p (gimple g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
if (combined_p)
g->gsbase.subcode |= GF_OMP_PARALLEL_COMBINED;
else
g->gsbase.subcode &= ~GF_OMP_PARALLEL_COMBINED;
}
/* Return the number of operands for statement GS. */
static inline unsigned
gimple_num_ops (const_gimple gs)
{
return gs->gsbase.num_ops;
}
/* Set the number of operands for statement GS. */
static inline void
gimple_set_num_ops (gimple gs, unsigned num_ops)
{
gs->gsbase.num_ops = num_ops;
}
/* Return the array of operands for statement GS. */
static inline tree *
gimple_ops (gimple gs)
{
size_t off;
/* All the tuples have their operand vector at the very bottom
of the structure. Note that those structures that do not
have an operand vector have a zero offset. */
off = gimple_ops_offset_[gimple_statement_structure (gs)];
gcc_assert (off != 0);
return (tree *) ((char *) gs + off);
}
/* Return operand I for statement GS. */
static inline tree
gimple_op (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
#ifdef ENABLE_CHECKING
gcc_assert (i < gimple_num_ops (gs));
#endif
return gimple_ops (CONST_CAST_GIMPLE (gs))[i];
}
else
return NULL_TREE;
}
/* Return a pointer to operand I for statement GS. */
static inline tree *
gimple_op_ptr (const_gimple gs, unsigned i)
{
if (gimple_has_ops (gs))
{
#ifdef ENABLE_CHECKING
gcc_assert (i < gimple_num_ops (gs));
#endif
return gimple_ops (CONST_CAST_GIMPLE (gs)) + i;
}
else
return NULL;
}
/* Set operand I of statement GS to OP. */
static inline void
gimple_set_op (gimple gs, unsigned i, tree op)
{
gcc_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Note. It may be tempting to assert that OP matches
is_gimple_operand, but that would be wrong. Different tuples
accept slightly different sets of tree operands. Each caller
should perform its own validation. */
gimple_ops (gs)[i] = op;
}
/* Return true if GS is a GIMPLE_ASSIGN. */
static inline bool
is_gimple_assign (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
}
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
static inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
}
/* Return the LHS of assignment statement GS. */
static inline tree
gimple_assign_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of assignment statement GS. */
static inline tree *
gimple_assign_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of assignment statement GS. */
static inline void
gimple_assign_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gimple_set_op (gs, 0, lhs);
/* Modula-3 July 2010 Jay Krell jay.krell@cornell.edu */
gcc_assert ((!lhs) || !VOID_TYPE_P ( TREE_TYPE (lhs)));
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the first operand on the RHS of assignment statement GS. */
static inline tree
gimple_assign_rhs1 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op (gs, 1);
}
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs1_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs1 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
/* Modula-3 July 2010 jay.krell@cornell.edu */
gcc_assert ((!rhs) || !VOID_TYPE_P ( TREE_TYPE (rhs)));
gimple_set_op (gs, 1, rhs);
}
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
static inline tree
gimple_assign_rhs2 (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
if (gimple_num_ops (gs) >= 3)
return gimple_op (gs, 2);
else
return NULL_TREE;
}
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
static inline tree *
gimple_assign_rhs2_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gimple_op_ptr (gs, 2);
}
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
static inline void
gimple_assign_set_rhs2 (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
/* Modula-3 July 2010 jay.krell@cornell.edu */
gcc_assert ((!rhs) || !VOID_TYPE_P ( TREE_TYPE (rhs)));
gimple_set_op (gs, 2, rhs);
}
/* Returns true if GS is a nontemporal move. */
static inline bool
gimple_assign_nontemporal_move_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
return gs->gsbase.nontemporal_move;
}
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
static inline void
gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
gs->gsbase.nontemporal_move = nontemporal;
}
/* Return the code of the expression computed on the rhs of assignment
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
static inline enum tree_code
gimple_assign_rhs_code (const_gimple gs)
{
enum tree_code code;
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
code = gimple_expr_code (gs);
if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
code = TREE_CODE (gimple_assign_rhs1 (gs));
return code;
}
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
static inline void
gimple_assign_set_rhs_code (gimple s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
s->gsbase.subcode = code;
}
/* Return the gimple rhs class of the code of the expression computed on
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
static inline enum gimple_rhs_class
gimple_assign_rhs_class (const_gimple gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
}
/* Return true if S is a type-cast assignment. */
static inline bool
gimple_assign_cast_p (gimple s)
{
if (is_gimple_assign (s))
{
enum tree_code sc = gimple_assign_rhs_code (s);
return CONVERT_EXPR_CODE_P (sc)
|| sc == VIEW_CONVERT_EXPR
|| sc == FIX_TRUNC_EXPR;
}
return false;
}
/* Return true if GS is a GIMPLE_CALL. */
static inline bool
is_gimple_call (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_CALL;
}
/* Return the LHS of call statement GS. */
static inline tree
gimple_call_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 0);
}
/* Return a pointer to the LHS of call statement GS. */
static inline tree *
gimple_call_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of call statement GS. */
static inline void
gimple_call_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 0, lhs);
if (lhs && TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = gs;
}
/* Return the tree node representing the function called by call
statement GS. */
static inline tree
gimple_call_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 1);
}
/* Return a pointer to the tree node representing the function called by call
statement GS. */
static inline tree *
gimple_call_fn_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 1);
}
/* Set FN to be the function called by call statement GS. */
static inline void
gimple_call_set_fn (gimple gs, tree fn)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 1, fn);
}
/* Set FNDECL to be the function called by call statement GS. */
static inline void
gimple_call_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl));
}
/* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it.
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
static inline tree
gimple_call_fndecl (const_gimple gs)
{
tree addr = gimple_call_fn (gs);
if (TREE_CODE (addr) == ADDR_EXPR)
return TREE_OPERAND (addr, 0);
return NULL_TREE;
}
/* Return the type returned by call statement GS. */
static inline tree
gimple_call_return_type (const_gimple gs)
{
tree fn = gimple_call_fn (gs);
tree type = TREE_TYPE (fn);
/* See through the pointer. */
type = TREE_TYPE (type);
/* The type returned by a FUNCTION_DECL is the type of its
function type. */
return TREE_TYPE (type);
}
/* Return the static chain for call statement GS. */
static inline tree
gimple_call_chain (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, 2);
}
/* Return a pointer to the static chain for call statement GS. */
static inline tree *
gimple_call_chain_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, 2);
}
/* Set CHAIN to be the static chain for call statement GS. */
static inline void
gimple_call_set_chain (gimple gs, tree chain)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, 2, chain);
}
/* Return the number of arguments used by call statement GS. */
static inline unsigned
gimple_call_num_args (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_CALL);
num_ops = gimple_num_ops (gs);
return num_ops - 3;
}
/* Return the argument at position INDEX for call statement GS. */
static inline tree
gimple_call_arg (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op (gs, index + 3);
}
/* Return a pointer to the argument at position INDEX for call
statement GS. */
static inline tree *
gimple_call_arg_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
return gimple_op_ptr (gs, index + 3);
}
/* Set ARG to be the argument at position INDEX for call statement GS. */
static inline void
gimple_call_set_arg (gimple gs, unsigned index, tree arg)
{
GIMPLE_CHECK (gs, GIMPLE_CALL);
gimple_set_op (gs, index + 3, arg);
}
/* If TAIL_P is true, mark call statement S as being a tail call
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
static inline void
gimple_call_set_tail (gimple s, bool tail_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (tail_p)
s->gsbase.subcode |= GF_CALL_TAILCALL;
else
s->gsbase.subcode &= ~GF_CALL_TAILCALL;
}
/* Return true if GIMPLE_CALL S is marked as a tail call. */
static inline bool
gimple_call_tail_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_TAILCALL) != 0;
}
/* Set the inlinable status of GIMPLE_CALL S to INLINABLE_P. */
static inline void
gimple_call_set_cannot_inline (gimple s, bool inlinable_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (inlinable_p)
s->gsbase.subcode |= GF_CALL_CANNOT_INLINE;
else
s->gsbase.subcode &= ~GF_CALL_CANNOT_INLINE;
}
/* Return true if GIMPLE_CALL S cannot be inlined. */
static inline bool
gimple_call_cannot_inline_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_CANNOT_INLINE) != 0;
}
/* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
static inline void
gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (return_slot_opt_p)
s->gsbase.subcode |= GF_CALL_RETURN_SLOT_OPT;
else
s->gsbase.subcode &= ~GF_CALL_RETURN_SLOT_OPT;
}
/* Return true if S is marked for return slot optimization. */
static inline bool
gimple_call_return_slot_opt_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
}
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
static inline void
gimple_call_set_from_thunk (gimple s, bool from_thunk_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (from_thunk_p)
s->gsbase.subcode |= GF_CALL_FROM_THUNK;
else
s->gsbase.subcode &= ~GF_CALL_FROM_THUNK;
}
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
static inline bool
gimple_call_from_thunk_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_FROM_THUNK) != 0;
}
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline void
gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (pass_arg_pack_p)
s->gsbase.subcode |= GF_CALL_VA_ARG_PACK;
else
s->gsbase.subcode &= ~GF_CALL_VA_ARG_PACK;
}
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
static inline bool
gimple_call_va_arg_pack_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (s->gsbase.subcode & GF_CALL_VA_ARG_PACK) != 0;
}
/* Return true if S is a noreturn call. */
static inline bool
gimple_call_noreturn_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
static inline void
gimple_call_set_nothrow (gimple s, bool nothrow_p)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
if (nothrow_p)
s->gsbase.subcode |= GF_CALL_NOTHROW;
else
s->gsbase.subcode &= ~GF_CALL_NOTHROW;
}
/* Return true if S is a nothrow call. */
static inline bool
gimple_call_nothrow_p (gimple s)
{
GIMPLE_CHECK (s, GIMPLE_CALL);
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
static inline void
gimple_call_copy_flags (gimple dest_call, gimple orig_call)
{
GIMPLE_CHECK (dest_call, GIMPLE_CALL);
GIMPLE_CHECK (orig_call, GIMPLE_CALL);
dest_call->gsbase.subcode = orig_call->gsbase.subcode;
}
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
static inline bool
gimple_has_lhs (gimple stmt)
{
return (is_gimple_assign (stmt)
|| (is_gimple_call (stmt)
&& gimple_call_lhs (stmt) != NULL_TREE));
}
/* Return the code of the predicate computed by conditional statement GS. */
static inline enum tree_code
gimple_cond_code (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return (enum tree_code) gs->gsbase.subcode;
}
/* Set CODE to be the predicate code for the conditional statement GS. */
static inline void
gimple_cond_set_code (gimple gs, enum tree_code code)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gs->gsbase.subcode = code;
}
/* Return the LHS of the predicate computed by conditional statement GS. */
static inline tree
gimple_cond_lhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 0);
}
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
static inline tree *
gimple_cond_lhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 0);
}
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_lhs (gimple gs, tree lhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 0, lhs);
}
/* Return the RHS operand of the predicate computed by conditional GS. */
static inline tree
gimple_cond_rhs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 1);
}
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
static inline tree *
gimple_cond_rhs_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op_ptr (gs, 1);
}
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
static inline void
gimple_cond_set_rhs (gimple gs, tree rhs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 1, rhs);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
static inline tree
gimple_cond_true_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 2);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
static inline void
gimple_cond_set_true_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 2, label);
}
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
static inline void
gimple_cond_set_false_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
gimple_set_op (gs, 3, label);
}
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
static inline tree
gimple_cond_false_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_COND);
return gimple_op (gs, 3);
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
static inline void
gimple_cond_make_false (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_false_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
static inline void
gimple_cond_make_true (gimple gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
gimple_cond_set_rhs (gs, boolean_true_node);
gs->gsbase.subcode = EQ_EXPR;
}
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
static inline bool
gimple_cond_true_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs != rhs)
return true;
if (code == EQ_EXPR && lhs == rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
static inline bool
gimple_cond_false_p (const_gimple gs)
{
tree lhs = gimple_cond_lhs (gs);
tree rhs = gimple_cond_rhs (gs);
enum tree_code code = gimple_cond_code (gs);
if (lhs != boolean_true_node && lhs != boolean_false_node)
return false;
if (rhs != boolean_true_node && rhs != boolean_false_node)
return false;
if (code == NE_EXPR && lhs == rhs)
return true;
if (code == EQ_EXPR && lhs != rhs)
return true;
return false;
}
/* Check if conditional statement GS is of the form 'if (var != 0)' or
'if (var == 1)' */
static inline bool
gimple_cond_single_var_p (gimple gs)
{
if (gimple_cond_code (gs) == NE_EXPR
&& gimple_cond_rhs (gs) == boolean_false_node)
return true;
if (gimple_cond_code (gs) == EQ_EXPR
&& gimple_cond_rhs (gs) == boolean_true_node)
return true;
return false;
}
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
static inline void
gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs)
{
gimple_cond_set_code (stmt, code);
gimple_cond_set_lhs (stmt, lhs);
gimple_cond_set_rhs (stmt, rhs);
}
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
static inline tree
gimple_label_label (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
return gimple_op (gs, 0);
}
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
static inline void
gimple_label_set_label (gimple gs, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_LABEL);
gimple_set_op (gs, 0, label);
}
/* Return the destination of the unconditional jump GS. */
static inline tree
gimple_goto_dest (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
return gimple_op (gs, 0);
}
/* Set DEST to be the destination of the unconditonal jump GS. */
static inline void
gimple_goto_set_dest (gimple gs, tree dest)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
gimple_set_op (gs, 0, dest);
}
/* Return the variables declared in the GIMPLE_BIND statement GS. */
static inline tree
gimple_bind_vars (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.vars;
}
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = vars;
}
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_append_vars (gimple gs, tree vars)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.vars = chainon (gs->gimple_bind.vars, vars);
}
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
static inline gimple_seq
gimple_bind_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.body;
}
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_body (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gs->gimple_bind.body = seq;
}
/* Append a statement to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_stmt (gimple gs, gimple stmt)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_stmt (&gs->gimple_bind.body, stmt);
}
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
static inline void
gimple_bind_add_seq (gimple gs, gimple_seq seq)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gimple_seq_add_seq (&gs->gimple_bind.body, seq);
}
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
static inline tree
gimple_bind_block (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
return gs->gimple_bind.block;
}
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
static inline void
gimple_bind_set_block (gimple gs, tree block)
{
GIMPLE_CHECK (gs, GIMPLE_BIND);
gcc_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK);
gs->gimple_bind.block = block;
}
/* Return the number of input operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_ninputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.ni;
}
/* Return the number of output operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_noutputs (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.no;
}
/* Return the number of clobber operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nclobbers (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nc;
}
/* Return the number of label operands for GIMPLE_ASM GS. */
static inline unsigned
gimple_asm_nlabels (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.nl;
}
/* Return input operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_input_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
return gimple_op (gs, index);
}
/* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_input_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
return gimple_op_ptr (gs, index);
}
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.ni);
gcc_assert (TREE_CODE (in_op) == TREE_LIST);
gimple_set_op (gs, index, in_op);
}
/* Return output operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_output_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
return gimple_op (gs, index + gs->gimple_asm.ni);
}
/* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */
static inline tree *
gimple_asm_output_op_ptr (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
return gimple_op_ptr (gs, index + gs->gimple_asm.ni);
}
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.no);
gcc_assert (TREE_CODE (out_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni, out_op);
}
/* Return clobber operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_clobber_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nc);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no);
}
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nc);
gcc_assert (TREE_CODE (clobber_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.no, clobber_op);
}
/* Return label operand INDEX of GIMPLE_ASM GS. */
static inline tree
gimple_asm_label_op (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nl);
return gimple_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc);
}
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */
static inline void
gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
gcc_assert (index <= gs->gimple_asm.nl);
gcc_assert (TREE_CODE (label_op) == TREE_LIST);
gimple_set_op (gs, index + gs->gimple_asm.ni + gs->gimple_asm.nc, label_op);
}
/* Return the string representing the assembly instruction in
GIMPLE_ASM GS. */
static inline const char *
gimple_asm_string (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return gs->gimple_asm.string;
}
/* Return true if GS is an asm statement marked volatile. */
static inline bool
gimple_asm_volatile_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_VOLATILE) != 0;
}
/* If VOLATLE_P is true, mark asm statement GS as volatile. */
static inline void
gimple_asm_set_volatile (gimple gs, bool volatile_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (volatile_p)
gs->gsbase.subcode |= GF_ASM_VOLATILE;
else
gs->gsbase.subcode &= ~GF_ASM_VOLATILE;
}
/* If INPUT_P is true, mark asm GS as an ASM_INPUT. */
static inline void
gimple_asm_set_input (gimple gs, bool input_p)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
if (input_p)
gs->gsbase.subcode |= GF_ASM_INPUT;
else
gs->gsbase.subcode &= ~GF_ASM_INPUT;
}
/* Return true if asm GS is an ASM_INPUT. */
static inline bool
gimple_asm_input_p (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_ASM);
return (gs->gsbase.subcode & GF_ASM_INPUT) != 0;
}
/* Return the types handled by GIMPLE_CATCH statement GS. */
static inline tree
gimple_catch_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.types;
}
/* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */
static inline tree *
gimple_catch_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.types;
}
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement GS. */
static inline gimple_seq
gimple_catch_handler (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return gs->gimple_catch.handler;
}
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement GS. */
static inline gimple_seq *
gimple_catch_handler_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
return &gs->gimple_catch.handler;
}
/* Set T to be the set of types handled by GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_types (gimple gs, tree t)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.types = t;
}
/* Set HANDLER to be the body of GIMPLE_CATCH GS. */
static inline void
gimple_catch_set_handler (gimple gs, gimple_seq handler)
{
GIMPLE_CHECK (gs, GIMPLE_CATCH);
gs->gimple_catch.handler = handler;
}
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
static inline tree
gimple_eh_filter_types (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.types;
}
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
static inline tree *
gimple_eh_filter_types_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return &gs->gimple_eh_filter.types;
}
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
static inline gimple_seq
gimple_eh_filter_failure (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
return gs->gimple_eh_filter.failure;
}
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_types (gimple gs, tree types)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.types = types;
}
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER GS. */
static inline void
gimple_eh_filter_set_failure (gimple gs, gimple_seq failure)
{
GIMPLE_CHECK (gs, GIMPLE_EH_FILTER);
gs->gimple_eh_filter.failure = failure;
}
/* Get the function decl to be called by the MUST_NOT_THROW region. */
static inline tree
gimple_eh_must_not_throw_fndecl (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
return gs->gimple_eh_mnt.fndecl;
}
/* Set the function decl to be called by GS to DECL. */
static inline void
gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
{
GIMPLE_CHECK (gs, GIMPLE_EH_MUST_NOT_THROW);
gs->gimple_eh_mnt.fndecl = decl;
}
/* GIMPLE_TRY accessors. */
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
static inline enum gimple_try_flags
gimple_try_kind (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return (enum gimple_try_flags) (gs->gsbase.subcode & GIMPLE_TRY_KIND);
}
/* Set the kind of try block represented by GIMPLE_TRY GS. */
static inline void
gimple_try_set_kind (gimple gs, enum gimple_try_flags kind)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gcc_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY);
if (gimple_try_kind (gs) != kind)
gs->gsbase.subcode = (unsigned int) kind;
}
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline bool
gimple_try_catch_is_cleanup (const_gimple gs)
{
gcc_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
return (gs->gsbase.subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0;
}
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_eval (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.eval;
}
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
static inline gimple_seq
gimple_try_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
return gs->gimple_try.cleanup;
}
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
static inline void
gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup)
{
gcc_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
if (catch_is_cleanup)
g->gsbase.subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP;
else
g->gsbase.subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP;
}
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY GS. */
static inline void
gimple_try_set_eval (gimple gs, gimple_seq eval)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.eval = eval;
}
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY GS. */
static inline void
gimple_try_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
gs->gimple_try.cleanup = cleanup;
}
/* Return the cleanup sequence for cleanup statement GS. */
static inline gimple_seq
gimple_wce_cleanup (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gimple_wce.cleanup;
}
/* Set CLEANUP to be the cleanup sequence for GS. */
static inline void
gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gimple_wce.cleanup = cleanup;
}
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline bool
gimple_wce_cleanup_eh_only (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
return gs->gsbase.subcode != 0;
}
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
static inline void
gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
gs->gsbase.subcode = (unsigned int) eh_only_p;
}
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
static inline unsigned
gimple_phi_capacity (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.capacity;
}
/* Return the number of arguments in GIMPLE_PHI GS. This must always
be exactly the number of incoming edges for the basic block holding
GS. */
static inline unsigned
gimple_phi_num_args (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.nargs;
}
/* Return the SSA name created by GIMPLE_PHI GS. */
static inline tree
gimple_phi_result (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return gs->gimple_phi.result;
}
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
static inline tree *
gimple_phi_result_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
return &gs->gimple_phi.result;
}
/* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */
static inline void
gimple_phi_set_result (gimple gs, tree result)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gs->gimple_phi.result = result;
}
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
static inline struct phi_arg_d *
gimple_phi_arg (gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_assert (index <= gs->gimple_phi.capacity);
return &(gs->gimple_phi.args[index]);
}
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI GS. */
static inline void
gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg)
{
GIMPLE_CHECK (gs, GIMPLE_PHI);
gcc_assert (index <= gs->gimple_phi.nargs);
memcpy (gs->gimple_phi.args + index, phiarg, sizeof (struct phi_arg_d));
}
/* Return the region number for GIMPLE_RESX GS. */
static inline int
gimple_resx_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_RESX GS. */
static inline void
gimple_resx_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_RESX);
gs->gimple_eh_ctrl.region = region;
}
/* Return the region number for GIMPLE_EH_DISPATCH GS. */
static inline int
gimple_eh_dispatch_region (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
return gs->gimple_eh_ctrl.region;
}
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */
static inline void
gimple_eh_dispatch_set_region (gimple gs, int region)
{
GIMPLE_CHECK (gs, GIMPLE_EH_DISPATCH);
gs->gimple_eh_ctrl.region = region;
}
/* Return the number of labels associated with the switch statement GS. */
static inline unsigned
gimple_switch_num_labels (const_gimple gs)
{
unsigned num_ops;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
num_ops = gimple_num_ops (gs);
gcc_assert (num_ops > 1);
return num_ops - 1;
}
/* Set NLABELS to be the number of labels for the switch statement GS. */
static inline void
gimple_switch_set_num_labels (gimple g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
gimple_set_num_ops (g, nlabels + 1);
}
/* Return the index variable used by the switch statement GS. */
static inline tree
gimple_switch_index (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op (gs, 0);
}
/* Return a pointer to the index variable for the switch statement GS. */
static inline tree *
gimple_switch_index_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
return gimple_op_ptr (gs, 0);
}
/* Set INDEX to be the index variable for switch statement GS. */
static inline void
gimple_switch_set_index (gimple gs, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
gimple_set_op (gs, 0, index);
}
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
static inline tree
gimple_switch_label (const_gimple gs, unsigned index)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (gimple_num_ops (gs) > index + 1);
return gimple_op (gs, index + 1);
}
/* Set the label number INDEX to LABEL. 0 is always the default label. */
static inline void
gimple_switch_set_label (gimple gs, unsigned index, tree label)
{
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
gcc_assert (gimple_num_ops (gs) > index + 1);
gcc_assert (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR);
gimple_set_op (gs, index + 1, label);
}
/* Return the default label for a switch statement. */
static inline tree
gimple_switch_default_label (const_gimple gs)
{
return gimple_switch_label (gs, 0);
}
/* Set the default label for a switch statement. */
static inline void
gimple_switch_set_default_label (gimple gs, tree label)
{
gimple_switch_set_label (gs, 0, label);
}
/* Return true if GS is a GIMPLE_DEBUG statement. */
static inline bool
is_gimple_debug (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
}
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
gimple_debug_bind_p (const_gimple s)
{
if (is_gimple_debug (s))
return s->gsbase.subcode == GIMPLE_DEBUG_BIND;
return false;
}
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
static inline tree
gimple_debug_bind_get_var (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 0);
}
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline tree
gimple_debug_bind_get_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 1);
}
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
static inline tree *
gimple_debug_bind_get_value_ptr (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op_ptr (dbg, 1);
}
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
static inline void
gimple_debug_bind_set_var (gimple dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 0, var);
}
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_set_value (gimple dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 1, value);
}
/* The second operand of a GIMPLE_DEBUG_BIND, when the value was
optimized away. */
#define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
static inline void
gimple_debug_bind_reset_value (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE);
}
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
static inline bool
gimple_debug_bind_has_value_p (gimple dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
#ifdef ENABLE_CHECKING
gcc_assert (gimple_debug_bind_p (dbg));
#endif
return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE;
}
#undef GIMPLE_DEBUG_BIND_NOVALUE
/* Return the body for the OMP statement GS. */
static inline gimple_seq
gimple_omp_body (gimple gs)
{
return gs->omp.body;
}
/* Set BODY to be the body for the OMP statement GS. */
static inline void
gimple_omp_set_body (gimple gs, gimple_seq body)
{
gs->omp.body = body;
}
/* Return the name associated with OMP_CRITICAL statement GS. */
static inline tree
gimple_omp_critical_name (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return gs->gimple_omp_critical.name;
}
/* Return a pointer to the name associated with OMP critical statement GS. */
static inline tree *
gimple_omp_critical_name_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
return &gs->gimple_omp_critical.name;
}
/* Set NAME to be the name associated with OMP critical statement GS. */
static inline void
gimple_omp_critical_set_name (gimple gs, tree name)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_CRITICAL);
gs->gimple_omp_critical.name = name;
}
/* Return the clauses associated with OMP_FOR GS. */
static inline tree
gimple_omp_for_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.clauses;
}
/* Return a pointer to the OMP_FOR GS. */
static inline tree *
gimple_omp_for_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return &gs->gimple_omp_for.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */
static inline void
gimple_omp_for_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.clauses = clauses;
}
/* Get the collapse count of OMP_FOR GS. */
static inline size_t
gimple_omp_for_collapse (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.collapse;
}
/* Return the index variable for OMP_FOR GS. */
static inline tree
gimple_omp_for_index (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].index;
}
/* Return a pointer to the index variable for OMP_FOR GS. */
static inline tree *
gimple_omp_for_index_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].index;
}
/* Set INDEX to be the index variable for OMP_FOR GS. */
static inline void
gimple_omp_for_set_index (gimple gs, size_t i, tree index)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].index = index;
}
/* Return the initial value for OMP_FOR GS. */
static inline tree
gimple_omp_for_initial (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].initial;
}
/* Return a pointer to the initial value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_initial_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].initial;
}
/* Set INITIAL to be the initial value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_initial (gimple gs, size_t i, tree initial)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].initial = initial;
}
/* Return the final value for OMP_FOR GS. */
static inline tree
gimple_omp_for_final (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].final;
}
/* Return a pointer to the final value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_final_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].final;
}
/* Set FINAL to be the final value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_final (gimple gs, size_t i, tree final)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].final = final;
}
/* Return the increment value for OMP_FOR GS. */
static inline tree
gimple_omp_for_incr (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].incr;
}
/* Return a pointer to the increment value for OMP_FOR GS. */
static inline tree *
gimple_omp_for_incr_ptr (gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return &gs->gimple_omp_for.iter[i].incr;
}
/* Set INCR to be the increment value for OMP_FOR GS. */
static inline void
gimple_omp_for_set_incr (gimple gs, size_t i, tree incr)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].incr = incr;
}
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
static inline gimple_seq
gimple_omp_for_pre_body (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
return gs->gimple_omp_for.pre_body;
}
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
static inline void
gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gs->gimple_omp_for.pre_body = pre_body;
}
/* Return the clauses associated with OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL
GS. */
static inline void
gimple_omp_parallel_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL GS. */
static inline tree
gimple_omp_parallel_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_PARALLEL GS. */
static inline tree *
gimple_omp_parallel_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */
static inline void
gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_PARALLEL);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_task_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_task_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_task_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_child_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_child_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_child_fn (gimple gs, tree child_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_task_data_arg (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_task_data_arg_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_task_set_data_arg (gimple gs, tree data_arg)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the clauses associated with OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_clauses (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.clauses;
}
/* Return a pointer to the clauses associated with OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_clauses_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.clauses;
}
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
static inline void
gimple_omp_taskreg_set_clauses (gimple gs, tree clauses)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.clauses = clauses;
}
/* Return the child function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_child_fn (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.child_fn;
}
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.child_fn;
}
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.child_fn = child_fn;
}
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
static inline tree
gimple_omp_taskreg_data_arg (const_gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_parallel.data_arg;
}
/* Return a pointer to the data argument for OMP_TASK GS. */
static inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple gs)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_parallel.data_arg;
}
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
static inline void
gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg)
{
if (gimple_code (gs) != GIMPLE_OMP_PARALLEL)
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_parallel.data_arg = data_arg;
}
/* Return the copy function used to hold the body of OMP_TASK GS. */
static inline tree
gimple_omp_task_copy_fn (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.copy_fn;
}
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
static inline tree *
gimple_omp_task_copy_fn_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.copy_fn;
}
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
static inline void
gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.copy_fn = copy_fn;
}
/* Return size of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_size (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_size;
}
/* Return a pointer to the data block size for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_size_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_size;
}
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_size (gimple gs, tree arg_size)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_size = arg_size;
}
/* Return align of the data block in bytes in OMP_TASK GS. */
static inline tree
gimple_omp_task_arg_align (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return gs->gimple_omp_task.arg_align;
}
/* Return a pointer to the data block align for OMP_TASK GS. */
static inline tree *
gimple_omp_task_arg_align_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
return &gs->gimple_omp_task.arg_align;
}
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
static inline void
gimple_omp_task_set_arg_align (gimple gs, tree arg_align)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASK);
gs->gimple_omp_task.arg_align = arg_align;
}
/* Return the clauses associated with OMP_SINGLE GS. */
static inline tree
gimple_omp_single_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return gs->gimple_omp_single.clauses;
}
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
static inline tree *
gimple_omp_single_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
return &gs->gimple_omp_single.clauses;
}
/* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */
static inline void
gimple_omp_single_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SINGLE);
gs->gimple_omp_single.clauses = clauses;
}
/* Return the clauses associated with OMP_SECTIONS GS. */
static inline tree
gimple_omp_sections_clauses (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.clauses;
}
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
static inline tree *
gimple_omp_sections_clauses_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.clauses;
}
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
static inline void
gimple_omp_sections_set_clauses (gimple gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.clauses = clauses;
}
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
static inline tree
gimple_omp_sections_control (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return gs->gimple_omp_sections.control;
}
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
static inline tree *
gimple_omp_sections_control_ptr (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
return &gs->gimple_omp_sections.control;
}
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
static inline void
gimple_omp_sections_set_control (gimple gs, tree control)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SECTIONS);
gs->gimple_omp_sections.control = control;
}
/* Set COND to be the condition code for OMP_FOR GS. */
static inline void
gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (TREE_CODE_CLASS (cond) == tcc_comparison);
gcc_assert (i < gs->gimple_omp_for.collapse);
gs->gimple_omp_for.iter[i].cond = cond;
}
/* Return the condition code associated with OMP_FOR GS. */
static inline enum tree_code
gimple_omp_for_cond (const_gimple gs, size_t i)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_FOR);
gcc_assert (i < gs->gimple_omp_for.collapse);
return gs->gimple_omp_for.iter[i].cond;
}
/* Set the value being stored in an atomic store. */
static inline void
gimple_omp_atomic_store_set_val (gimple g, tree val)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
g->gimple_omp_atomic_store.val = val;
}
/* Return the value being stored in an atomic store. */
static inline tree
gimple_omp_atomic_store_val (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return g->gimple_omp_atomic_store.val;
}
/* Return a pointer to the value being stored in an atomic store. */
static inline tree *
gimple_omp_atomic_store_val_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE);
return &g->gimple_omp_atomic_store.val;
}
/* Set the LHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_lhs (gimple g, tree lhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.lhs = lhs;
}
/* Get the LHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_lhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.lhs;
}
/* Return a pointer to the LHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_lhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.lhs;
}
/* Set the RHS of an atomic load. */
static inline void
gimple_omp_atomic_load_set_rhs (gimple g, tree rhs)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
g->gimple_omp_atomic_load.rhs = rhs;
}
/* Get the RHS of an atomic load. */
static inline tree
gimple_omp_atomic_load_rhs (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return g->gimple_omp_atomic_load.rhs;
}
/* Return a pointer to the RHS of an atomic load. */
static inline tree *
gimple_omp_atomic_load_rhs_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_LOAD);
return &g->gimple_omp_atomic_load.rhs;
}
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_def (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_def;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_def_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_def;
}
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_def (gimple g, tree def)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_def = def;
}
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline tree
gimple_omp_continue_control_use (const_gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return g->gimple_omp_continue.control_use;
}
/* The same as above, but return the address. */
static inline tree *
gimple_omp_continue_control_use_ptr (gimple g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
return &g->gimple_omp_continue.control_use;
}
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
static inline void
gimple_omp_continue_set_control_use (gimple g, tree use)
{
GIMPLE_CHECK (g, GIMPLE_OMP_CONTINUE);
g->gimple_omp_continue.control_use = use;
}
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
static inline tree *
gimple_return_retval_ptr (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op_ptr (gs, 0);
}
/* Return the return value for GIMPLE_RETURN GS. */
static inline tree
gimple_return_retval (const_gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
return gimple_op (gs, 0);
}
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
static inline void
gimple_return_set_retval (gimple gs, tree retval)
{
GIMPLE_CHECK (gs, GIMPLE_RETURN);
gimple_set_op (gs, 0, retval);
}
/* Returns true when the gimple statment STMT is any of the OpenMP types. */
#define CASE_GIMPLE_OMP \
case GIMPLE_OMP_PARALLEL: \
case GIMPLE_OMP_TASK: \
case GIMPLE_OMP_FOR: \
case GIMPLE_OMP_SECTIONS: \
case GIMPLE_OMP_SECTIONS_SWITCH: \
case GIMPLE_OMP_SINGLE: \
case GIMPLE_OMP_SECTION: \
case GIMPLE_OMP_MASTER: \
case GIMPLE_OMP_ORDERED: \
case GIMPLE_OMP_CRITICAL: \
case GIMPLE_OMP_RETURN: \
case GIMPLE_OMP_ATOMIC_LOAD: \
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
static inline bool
is_gimple_omp (const_gimple stmt)
{
switch (gimple_code (stmt))
{
CASE_GIMPLE_OMP:
return true;
default:
return false;
}
}
/* Returns TRUE if statement G is a GIMPLE_NOP. */
static inline bool
gimple_nop_p (const_gimple g)
{
return gimple_code (g) == GIMPLE_NOP;
}
/* Return true if GS is a GIMPLE_RESX. */
static inline bool
is_gimple_resx (const_gimple gs)
{
return gimple_code (gs) == GIMPLE_RESX;
}
/* Return the predictor of GIMPLE_PREDICT statement GS. */
static inline enum br_predictor
gimple_predict_predictor (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (enum br_predictor) (gs->gsbase.subcode & ~GF_PREDICT_TAKEN);
}
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
static inline void
gimple_predict_set_predictor (gimple gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
gs->gsbase.subcode = (gs->gsbase.subcode & GF_PREDICT_TAKEN)
| (unsigned) predictor;
}
/* Return the outcome of GIMPLE_PREDICT statement GS. */
static inline enum prediction
gimple_predict_outcome (gimple gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
return (gs->gsbase.subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN;
}
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
static inline void
gimple_predict_set_outcome (gimple gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
if (outcome == TAKEN)
gs->gsbase.subcode |= GF_PREDICT_TAKEN;
else
gs->gsbase.subcode &= ~GF_PREDICT_TAKEN;
}
/* Return the type of the main expression computed by STMT. Return
void_type_node if the statement computes nothing. */
static inline tree
gimple_expr_type (const_gimple stmt)
{
enum gimple_code code = gimple_code (stmt);
if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
{
tree type;
/* In general we want to pass out a type that can be substituted
for both the RHS and the LHS types if there is a possibly
useless conversion involved. That means returning the
original RHS type as far as we can reconstruct it. */
if (code == GIMPLE_CALL)
type = gimple_call_return_type (stmt);
else
switch (gimple_assign_rhs_code (stmt))
{
case POINTER_PLUS_EXPR:
type = TREE_TYPE (gimple_assign_rhs1 (stmt));
break;
default:
/* As fallback use the type of the LHS. */
type = TREE_TYPE (gimple_get_lhs (stmt));
break;
}
return type;
}
else if (code == GIMPLE_COND)
return boolean_type_node;
else
return void_type_node;
}
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
static inline gimple_stmt_iterator
gsi_start (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the first statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_first (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
static inline gimple_stmt_iterator
gsi_last (gimple_seq seq)
{
gimple_stmt_iterator i;
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = (i.ptr && i.ptr->stmt) ? gimple_bb (i.ptr->stmt) : NULL;
return i;
}
/* Return a new iterator pointing to the last statement in basic block BB. */
static inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
gimple_seq seq;
seq = bb_seq (bb);
i.ptr = gimple_seq_last (seq);
i.seq = seq;
i.bb = bb;
return i;
}
/* Return true if I is at the end of its sequence. */
static inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
}
/* Return true if I is one statement before the end of its sequence. */
static inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->next == NULL;
}
/* Advance the iterator to the next gimple statement. */
static inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->next;
}
/* Advance the iterator to the previous gimple statement. */
static inline void
gsi_prev (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->prev;
}
/* Return the current stmt. */
static inline gimple
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr->stmt;
}
/* Return a block statement iterator that points to the first non-label
statement in block BB. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
gsi_next (&gsi);
return gsi;
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_next (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Advance the iterator to the next non-debug gimple statement. */
static inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
{
gsi_prev (i);
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_next_nondebug (&i);
return i;
}
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
static inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
if (!gsi_end_p (i) && is_gimple_debug (gsi_stmt (i)))
gsi_prev_nondebug (&i);
return i;
}
/* Return a pointer to the current stmt.
NOTE: You may want to use gsi_replace on the iterator itself,
as this performs additional bookkeeping that will not be done
if you simply assign through a pointer returned by gsi_stmt_ptr. */
static inline gimple *
gsi_stmt_ptr (gimple_stmt_iterator *i)
{
return &i->ptr->stmt;
}
/* Return the basic block associated with this iterator. */
static inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
}
/* Return the sequence associated with this iterator. */
static inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return i.seq;
}
enum gsi_iterator_update
{
GSI_NEW_STMT, /* Only valid when single statement is added, move
iterator to it. */
GSI_SAME_STMT, /* Leave the iterator at the same statement. */
GSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable
for linking other statements in the same
direction. */
};
/* In gimple-iterator.c */
gimple_stmt_iterator gsi_start_phis (basic_block);
gimple_seq gsi_split_seq_after (gimple_stmt_iterator);
gimple_seq gsi_split_seq_before (gimple_stmt_iterator *);
void gsi_replace (gimple_stmt_iterator *, gimple, bool);
void gsi_insert_before (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_before_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_before (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_before_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_after (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_after_without_update (gimple_stmt_iterator *, gimple,
enum gsi_iterator_update);
void gsi_insert_seq_after (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_insert_seq_after_without_update (gimple_stmt_iterator *, gimple_seq,
enum gsi_iterator_update);
void gsi_remove (gimple_stmt_iterator *, bool);
gimple_stmt_iterator gsi_for_stmt (gimple);
void gsi_move_after (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_before (gimple_stmt_iterator *, gimple_stmt_iterator *);
void gsi_move_to_bb_end (gimple_stmt_iterator *, struct basic_block_def *);
void gsi_insert_on_edge (edge, gimple);
void gsi_insert_seq_on_edge (edge, gimple_seq);
basic_block gsi_insert_on_edge_immediate (edge, gimple);
basic_block gsi_insert_seq_on_edge_immediate (edge, gimple_seq);
void gsi_commit_one_edge_insert (edge, basic_block *);
void gsi_commit_edge_inserts (void);
gimple gimple_call_copy_skip_args (gimple, bitmap);
/* Convenience routines to walk all statements of a gimple function.
Note that this is useful exclusively before the code is converted
into SSA form. Once the program is in SSA form, the standard
operand interface should be used to analyze/modify statements. */
struct walk_stmt_info
{
/* Points to the current statement being walked. */
gimple_stmt_iterator gsi;
/* Additional data that the callback functions may want to carry
through the recursion. */
void *info;
/* Pointer map used to mark visited tree nodes when calling
walk_tree on each operand. If set to NULL, duplicate tree nodes
will be visited more than once. */
struct pointer_set_t *pset;
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
means the common subset of is_gimple_lvalue and is_gimple_rhs,
but we never try to form anything more complicated than that, so
we don't bother checking.
Also note that CALLBACK should update this flag while walking the
sub-expressions of a statement. For instance, when walking the
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
bool val_only;
/* True if we are currently walking the LHS of an assignment. */
bool is_lhs;
/* Optional. Set to true by the callback functions if they made any
changes. */
bool changed;
/* True if we're interested in location information. */
bool want_locations;
/* Operand returned by the callbacks. This is set when calling
walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
returns non-NULL, this field will contain the tree returned by
the last callback. */
tree callback_result;
};
/* Callback for walk_gimple_stmt. Called for every statement found
during traversal. The first argument points to the statement to
walk. The second argument is a flag that the callback sets to
'true' if it the callback handled all the operands and
sub-statements of the statement (the default value of this flag is
'false'). The third argument is an anonymous pointer to data
to be used by the callback. */
typedef tree (*walk_stmt_fn) (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
gimple walk_gimple_seq (gimple_seq, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_stmt (gimple_stmt_iterator *, walk_stmt_fn, walk_tree_fn,
struct walk_stmt_info *);
tree walk_gimple_op (gimple, walk_tree_fn, struct walk_stmt_info *);
#ifdef GATHER_STATISTICS
/* Enum and arrays used for allocation stats. Keep in sync with
gimple.c:gimple_alloc_kind_names. */
enum gimple_alloc_kind
{
gimple_alloc_kind_assign, /* Assignments. */
gimple_alloc_kind_phi, /* PHI nodes. */
gimple_alloc_kind_cond, /* Conditionals. */
gimple_alloc_kind_seq, /* Sequences. */
gimple_alloc_kind_rest, /* Everything else. */
gimple_alloc_kind_all
};
extern int gimple_alloc_counts[];
extern int gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
static inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
{
case GIMPLE_ASSIGN:
return gimple_alloc_kind_assign;
case GIMPLE_PHI:
return gimple_alloc_kind_phi;
case GIMPLE_COND:
return gimple_alloc_kind_cond;
default:
return gimple_alloc_kind_rest;
}
}
#endif /* GATHER_STATISTICS */
extern void dump_gimple_statistics (void);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* GCC_GIMPLE_H */
|
scale_channels_layer.c | #include "scale_channels_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include <stdio.h>
#include <assert.h>
layer make_scale_channels_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr,"scale Layer: %d\n", index);
layer l = { (LAYER_TYPE)0 };
l.type = SCALE_CHANNELS;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
assert(w == 1 && h == 1);
l.out_w = w2;
l.out_h = h2;
l.out_c = c2;
assert(l.out_c == l.c);
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.outputs;
l.index = index;
l.delta = (float*)calloc(l.outputs * batch, sizeof(float));
l.output = (float*)calloc(l.outputs * batch, sizeof(float));
l.forward = forward_scale_channels_layer;
l.backward = backward_scale_channels_layer;
#ifdef GPU
l.forward_gpu = forward_scale_channels_layer_gpu;
l.backward_gpu = backward_scale_channels_layer_gpu;
l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
#endif
return l;
}
void resize_scale_channels_layer(layer *l, network *net)
{
layer first = net->layers[l->index];
l->out_w = first.out_w;
l->out_h = first.out_h;
l->outputs = l->out_w*l->out_h*l->out_c;
l->inputs = l->outputs;
l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
cuda_free(l->delta_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
#endif
}
void forward_scale_channels_layer(const layer l, network_state state)
{
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = l.out_w * l.out_h;
float *from_output = state.net.layers[l.index].output;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
l.output[i] = state.input[i / channel_size] * from_output[i];
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
void backward_scale_channels_layer(const layer l, network_state state)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
//axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
//scale_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = l.out_w * l.out_h;
float *from_output = state.net.layers[l.index].output;
float *from_delta = state.net.layers[l.index].delta;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
state.delta[i / channel_size] += l.delta[i] * from_output[i] / channel_size; // l.delta * from (should be divided by channel_size?)
from_delta[i] += state.input[i / channel_size] * l.delta[i]; // input * l.delta
}
}
#ifdef GPU
void forward_scale_channels_layer_gpu(const layer l, network_state state)
{
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = l.out_w * l.out_h;
scale_channels_gpu(state.net.layers[l.index].output_gpu, size, channel_size, state.input, l.output_gpu);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_scale_channels_layer_gpu(const layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = l.out_w * l.out_h;
float *from_output = state.net.layers[l.index].output_gpu;
float *from_delta = state.net.layers[l.index].delta_gpu;
backward_scale_channels_gpu(l.delta_gpu, size, channel_size, state.input, from_delta, from_output, state.delta);
}
#endif
|
GB_binop__isge_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64)
// A*D function (colscale): GB (_AxD__isge_uint64)
// D*A function (rowscale): GB (_DxB__isge_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64)
// C=scalar+B GB (_bind1st__isge_uint64)
// C=scalar+B' GB (_bind1st_tran__isge_uint64)
// C=A+scalar GB (_bind2nd__isge_uint64)
// C=A'+scalar GB (_bind2nd_tran__isge_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.c | #ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "utils.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#include <assert.h>
#include <float.h>
#include <limits.h>
#include "darkunistd.h"
#include <libgen.h>
#ifdef WIN32
#include "gettimeofday.h"
#else
#include <sys/time.h>
#include <sys/stat.h>
#endif
#ifndef USE_CMAKE_LIBS
#pragma warning(disable: 4996)
#endif
void *xmalloc(size_t size) {
void *ptr=malloc(size);
if(!ptr) {
malloc_error();
}
return ptr;
}
void *xcalloc(size_t nmemb, size_t size) {
void *ptr=calloc(nmemb,size);
if(!ptr) {
calloc_error();
}
return ptr;
}
void *xrealloc(void *ptr, size_t size) {
ptr=realloc(ptr,size);
if(!ptr) {
realloc_error();
}
return ptr;
}
double what_time_is_it_now()
{
struct timeval time;
if (gettimeofday(&time, NULL)) {
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int *read_map(char *filename)
{
int n = 0;
int *map = 0;
char *str;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
while((str=fgetl(file))){
++n;
map = (int*)xrealloc(map, n * sizeof(int));
map[n-1] = atoi(str);
free(str);
}
if (file) fclose(file);
return map;
}
void sorta_shuffle(void *arr, size_t n, size_t size, size_t sections)
{
size_t i;
for(i = 0; i < sections; ++i){
size_t start = n*i/sections;
size_t end = n*(i+1)/sections;
size_t num = end-start;
shuffle((char*)arr+(start*size), num, size);
}
}
void shuffle(void *arr, size_t n, size_t size)
{
size_t i;
void* swp = (void*)xcalloc(1, size);
for(i = 0; i < n-1; ++i){
size_t j = i + random_gen()/(RAND_MAX / (n-i)+1);
memcpy(swp, (char*)arr+(j*size), size);
memcpy((char*)arr+(j*size), (char*)arr+(i*size), size);
memcpy((char*)arr+(i*size), swp, size);
}
free(swp);
}
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *basecfg(char *cfgfile)
{
char *c = cfgfile;
char *next;
while((next = strchr(c, '/')))
{
c = next+1;
}
if(!next) while ((next = strchr(c, '\\'))) { c = next + 1; }
c = copy_string(c);
next = strchr(c, '.');
if (next) *next = 0;
return c;
}
int alphanum_to_int(char c)
{
return (c < 58) ? c - 48 : c-87;
}
char int_to_alphanum(int i)
{
if (i == 36) return '.';
return (i < 10) ? i + 48 : i + 87;
}
void pm(int M, int N, float *A)
{
int i,j;
for(i =0 ; i < M; ++i){
printf("%d ", i+1);
for(j = 0; j < N; ++j){
printf("%2.4f, ", A[i*N+j]);
}
printf("\n");
}
printf("\n");
}
void find_replace(const char* str, char* orig, char* rep, char* output)
{
char* buffer = (char*)calloc(8192, sizeof(char));
char *p;
sprintf(buffer, "%s", str);
if (!(p = strstr(buffer, orig))) { // Is 'orig' even in 'str'?
sprintf(output, "%s", buffer);
free(buffer);
return;
}
*p = '\0';
sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig));
free(buffer);
}
void trim(char *str)
{
char* buffer = (char*)xcalloc(8192, sizeof(char));
sprintf(buffer, "%s", str);
char *p = buffer;
while (*p == ' ' || *p == '\t') ++p;
char *end = p + strlen(p) - 1;
while (*end == ' ' || *end == '\t') {
*end = '\0';
--end;
}
sprintf(str, "%s", p);
free(buffer);
}
void find_replace_extension(char *str, char *orig, char *rep, char *output)
{
char* buffer = (char*)calloc(8192, sizeof(char));
sprintf(buffer, "%s", str);
char *p = strstr(buffer, orig);
int offset = (p - buffer);
int chars_from_end = strlen(buffer) - offset;
if (!p || chars_from_end != strlen(orig)) { // Is 'orig' even in 'str' AND is 'orig' found at the end of 'str'?
sprintf(output, "%s", buffer);
free(buffer);
return;
}
*p = '\0';
sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig));
free(buffer);
}
void replace_image_to_label(const char* input_path, char* output_path)
{
find_replace(input_path, "/images/train2014/", "/labels/train2014/", output_path); // COCO
find_replace(output_path, "/images/val2014/", "/labels/val2014/", output_path); // COCO
find_replace(output_path, "/JPEGImages/", "/labels/", output_path); // PascalVOC
find_replace(output_path, "\\images\\train2014\\", "\\labels\\train2014\\", output_path); // COCO
find_replace(output_path, "\\images\\val2014\\", "\\labels\\val2014\\", output_path); // COCO
find_replace(output_path, "\\JPEGImages\\", "\\labels\\", output_path); // PascalVOC
//find_replace(output_path, "/images/", "/labels/", output_path); // COCO
//find_replace(output_path, "/VOC2007/JPEGImages/", "/VOC2007/labels/", output_path); // PascalVOC
//find_replace(output_path, "/VOC2012/JPEGImages/", "/VOC2012/labels/", output_path); // PascalVOC
//find_replace(output_path, "/raw/", "/labels/", output_path);
trim(output_path);
// replace only ext of files
find_replace_extension(output_path, ".jpg", ".txt", output_path);
find_replace_extension(output_path, ".JPG", ".txt", output_path); // error
find_replace_extension(output_path, ".jpeg", ".txt", output_path);
find_replace_extension(output_path, ".JPEG", ".txt", output_path);
find_replace_extension(output_path, ".png", ".txt", output_path);
find_replace_extension(output_path, ".PNG", ".txt", output_path);
find_replace_extension(output_path, ".bmp", ".txt", output_path);
find_replace_extension(output_path, ".BMP", ".txt", output_path);
find_replace_extension(output_path, ".ppm", ".txt", output_path);
find_replace_extension(output_path, ".PPM", ".txt", output_path);
find_replace_extension(output_path, ".tiff", ".txt", output_path);
find_replace_extension(output_path, ".TIFF", ".txt", output_path);
// Check file ends with txt:
if(strlen(output_path) > 4) {
char *output_path_ext = output_path + strlen(output_path) - 4;
if( strcmp(".txt", output_path_ext) != 0){
fprintf(stderr, "Failed to infer label file name (check image extension is supported): %s \n", output_path);
}
}else{
fprintf(stderr, "Label file name is too short: %s \n", output_path);
}
}
float sec(clock_t clocks)
{
return (float)clocks/CLOCKS_PER_SEC;
}
void top_k(float *a, int n, int k, int *index)
{
int i,j;
for(j = 0; j < k; ++j) index[j] = -1;
for(i = 0; i < n; ++i){
int curr = i;
for(j = 0; j < k; ++j){
if((index[j] < 0) || a[curr] > a[index[j]]){
int swap = curr;
curr = index[j];
index[j] = swap;
}
}
}
}
void error(const char *s)
{
perror(s);
assert(0);
exit(EXIT_FAILURE);
}
void malloc_error()
{
fprintf(stderr, "xMalloc error\n");
exit(EXIT_FAILURE);
}
void calloc_error()
{
fprintf(stderr, "Calloc error\n");
exit(EXIT_FAILURE);
}
void realloc_error()
{
fprintf(stderr, "Realloc error\n");
exit(EXIT_FAILURE);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(EXIT_FAILURE);
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n'||c =='\r'||c==0x0d||c==0x0a) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_args(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for (i = 0; i < len; ++i) {
char c = s[i];
if (c == '\t' || c == '\n' || c == '\r' || c == 0x0d || c == 0x0a) ++offset;
else s[i - offset] = c;
}
s[len - offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char* line = (char*)xmalloc(size * sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char*)xrealloc(line, size * sizeof(char));
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(curr >= 2)
if(line[curr-2] == 0x0d) line[curr-2] = 0x00;
if(curr >= 1)
if(line[curr-1] == 0x0a) line[curr-1] = 0x00;
return line;
}
int read_int(int fd)
{
int n = 0;
int next = read(fd, &n, sizeof(int));
if(next <= 0) return -1;
return n;
}
void write_int(int fd, int n)
{
int next = write(fd, &n, sizeof(int));
if(next <= 0) error("read failed");
}
int read_all_fail(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
int next = read(fd, buffer + n, bytes-n);
if(next <= 0) return 1;
n += next;
}
return 0;
}
int write_all_fail(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
size_t next = write(fd, buffer + n, bytes-n);
if(next <= 0) return 1;
n += next;
}
return 0;
}
void read_all(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
int next = read(fd, buffer + n, bytes-n);
if(next <= 0) error("read failed");
n += next;
}
}
void write_all(int fd, char *buffer, size_t bytes)
{
size_t n = 0;
while(n < bytes){
size_t next = write(fd, buffer + n, bytes-n);
if(next <= 0) error("write failed");
n += next;
}
}
char *copy_string(char *s)
{
if(!s) {
return NULL;
}
char* copy = (char*)xmalloc(strlen(s) + 1);
strncpy(copy, s, strlen(s)+1);
return copy;
}
list *parse_csv_line(char *line)
{
list *l = make_list();
char *c, *p;
int in = 0;
for(c = line, p = line; *c != '\0'; ++c){
if(*c == '"') in = !in;
else if(*c == ',' && !in){
*c = '\0';
list_insert(l, copy_string(p));
p = c+1;
}
}
list_insert(l, copy_string(p));
return l;
}
int count_fields(char *line)
{
int count = 0;
int done = 0;
char *c;
for(c = line; !done; ++c){
done = (*c == '\0');
if(*c == ',' || done) ++count;
}
return count;
}
float *parse_fields(char *line, int n)
{
float* field = (float*)xcalloc(n, sizeof(float));
char *c, *p, *end;
int count = 0;
int done = 0;
for(c = line, p = line; !done; ++c){
done = (*c == '\0');
if(*c == ',' || done){
*c = '\0';
field[count] = strtod(p, &end);
if(p == c) field[count] = nan("");
if(end != c && (end != c-1 || *end != '\r')) field[count] = nan(""); //DOS file formats!
p = c+1;
++count;
}
}
return field;
}
float sum_array(float *a, int n)
{
int i;
float sum = 0;
for(i = 0; i < n; ++i) sum += a[i];
return sum;
}
float mean_array(float *a, int n)
{
return sum_array(a,n)/n;
}
void mean_arrays(float **a, int n, int els, float *avg)
{
int i;
int j;
memset(avg, 0, els*sizeof(float));
for(j = 0; j < n; ++j){
#pragma omp parallel for
for(i = 0; i < els; ++i){
avg[i] += a[j][i];
}
}
#pragma omp parallel for
for(i = 0; i < els; ++i){
avg[i] /= n;
}
}
void print_statistics(float *a, int n)
{
float m = mean_array(a, n);
float v = variance_array(a, n);
printf("MSE: %.6f, Mean: %.6f, Variance: %.6f\n", mse_array(a, n), m, v);
}
float variance_array(float *a, int n)
{
int i;
float sum = 0;
float mean = mean_array(a, n);
for(i = 0; i < n; ++i) sum += (a[i] - mean)*(a[i]-mean);
float variance = sum/n;
return variance;
}
int constrain_int(int a, int min, int max)
{
if (a < min) return min;
if (a > max) return max;
return a;
}
float constrain(float min, float max, float a)
{
if (a < min) return min;
if (a > max) return max;
return a;
}
float dist_array(float *a, float *b, int n, int sub)
{
int i;
float sum = 0;
for(i = 0; i < n; i += sub) sum += pow(a[i]-b[i], 2);
return sqrt(sum);
}
float mse_array(float *a, int n)
{
int i;
float sum = 0;
for(i = 0; i < n; ++i) sum += a[i]*a[i];
return sqrt(sum/n);
}
void normalize_array(float *a, int n)
{
int i;
float mu = mean_array(a,n);
float sigma = sqrt(variance_array(a,n));
for(i = 0; i < n; ++i){
a[i] = (a[i] - mu)/sigma;
}
mu = mean_array(a,n);
sigma = sqrt(variance_array(a,n));
}
void translate_array(float *a, int n, float s)
{
int i;
for(i = 0; i < n; ++i){
a[i] += s;
}
}
float mag_array(float *a, int n)
{
int i;
float sum = 0;
for(i = 0; i < n; ++i){
sum += a[i]*a[i];
}
return sqrt(sum);
}
// indicies to skip is a bit array
float mag_array_skip(float *a, int n, int * indices_to_skip)
{
int i;
float sum = 0;
for (i = 0; i < n; ++i) {
if (indices_to_skip[i] != 1) {
sum += a[i] * a[i];
}
}
return sqrt(sum);
}
void scale_array(float *a, int n, float s)
{
int i;
for(i = 0; i < n; ++i){
a[i] *= s;
}
}
int sample_array(float *a, int n)
{
float sum = sum_array(a, n);
scale_array(a, n, 1. / sum);
float r = rand_uniform(0, 1);
int i;
for (i = 0; i < n; ++i) {
r = r - a[i];
if (r <= 0) return i;
}
return n - 1;
}
int sample_array_custom(float *a, int n)
{
float sum = sum_array(a, n);
scale_array(a, n, 1./sum);
float r = rand_uniform(0, 1);
int start_index = rand_int(0, 0);
int i;
for(i = 0; i < n; ++i){
r = r - a[(i + start_index) % n];
if (r <= 0) return i;
}
return n-1;
}
int max_index(float *a, int n)
{
if(n <= 0) return -1;
int i, max_i = 0;
float max = a[0];
for(i = 1; i < n; ++i){
if(a[i] > max){
max = a[i];
max_i = i;
}
}
return max_i;
}
int top_max_index(float *a, int n, int k)
{
if (n <= 0) return -1;
float *values = (float*)xcalloc(k, sizeof(float));
int *indexes = (int*)xcalloc(k, sizeof(int));
int i, j;
for (i = 0; i < n; ++i) {
for (j = 0; j < k; ++j) {
if (a[i] > values[j]) {
values[j] = a[i];
indexes[j] = i;
break;
}
}
}
int count = 0;
for (j = 0; j < k; ++j) if (values[j] > 0) count++;
int get_index = rand_int(0, count-1);
int val = indexes[get_index];
free(indexes);
free(values);
return val;
}
int int_index(int *a, int val, int n)
{
int i;
for (i = 0; i < n; ++i) {
if (a[i] == val) return i;
}
return -1;
}
int rand_int(int min, int max)
{
if (max < min){
int s = min;
min = max;
max = s;
}
int r = (random_gen()%(max - min + 1)) + min;
return r;
}
// From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
float rand_normal()
{
static int haveSpare = 0;
static double rand1, rand2;
if(haveSpare)
{
haveSpare = 0;
return sqrt(rand1) * sin(rand2);
}
haveSpare = 1;
rand1 = random_gen() / ((double) RAND_MAX);
if(rand1 < 1e-100) rand1 = 1e-100;
rand1 = -2 * log(rand1);
rand2 = (random_gen() / ((double)RAND_MAX)) * 2.0 * M_PI;
return sqrt(rand1) * cos(rand2);
}
/*
float rand_normal()
{
int n = 12;
int i;
float sum= 0;
for(i = 0; i < n; ++i) sum += (float)random_gen()/RAND_MAX;
return sum-n/2.;
}
*/
size_t rand_size_t()
{
return ((size_t)(random_gen()&0xff) << 56) |
((size_t)(random_gen()&0xff) << 48) |
((size_t)(random_gen()&0xff) << 40) |
((size_t)(random_gen()&0xff) << 32) |
((size_t)(random_gen()&0xff) << 24) |
((size_t)(random_gen()&0xff) << 16) |
((size_t)(random_gen()&0xff) << 8) |
((size_t)(random_gen()&0xff) << 0);
}
float rand_uniform(float min, float max)
{
if(max < min){
float swap = min;
min = max;
max = swap;
}
#if (RAND_MAX < 65536)
int rnd = rand()*(RAND_MAX + 1) + rand();
return ((float)rnd / (RAND_MAX*RAND_MAX) * (max - min)) + min;
#else
return ((float)rand() / RAND_MAX * (max - min)) + min;
#endif
//return (random_float() * (max - min)) + min;
}
float rand_scale(float s)
{
float scale = rand_uniform_strong(1, s);
if(random_gen()%2) return scale;
return 1./scale;
}
float **one_hot_encode(float *a, int n, int k)
{
int i;
float** t = (float**)xcalloc(n, sizeof(float*));
for(i = 0; i < n; ++i){
t[i] = (float*)xcalloc(k, sizeof(float));
int index = (int)a[i];
t[i][index] = 1;
}
return t;
}
static unsigned int x = 123456789, y = 362436069, z = 521288629;
// Marsaglia's xorshf96 generator: period 2^96-1
unsigned int random_gen_fast(void)
{
unsigned int t;
x ^= x << 16;
x ^= x >> 5;
x ^= x << 1;
t = x;
x = y;
y = z;
z = t ^ x ^ y;
return z;
}
float random_float_fast()
{
return ((float)random_gen_fast() / (float)UINT_MAX);
}
int rand_int_fast(int min, int max)
{
if (max < min) {
int s = min;
min = max;
max = s;
}
int r = (random_gen_fast() % (max - min + 1)) + min;
return r;
}
unsigned int random_gen()
{
unsigned int rnd = 0;
#ifdef WIN32
rand_s(&rnd);
#else // WIN32
rnd = rand();
#if (RAND_MAX < 65536)
rnd = rand()*(RAND_MAX + 1) + rnd;
#endif //(RAND_MAX < 65536)
#endif // WIN32
return rnd;
}
float random_float()
{
unsigned int rnd = 0;
#ifdef WIN32
rand_s(&rnd);
return ((float)rnd / (float)UINT_MAX);
#else // WIN32
rnd = rand();
#if (RAND_MAX < 65536)
rnd = rand()*(RAND_MAX + 1) + rnd;
return((float)rnd / (float)(RAND_MAX*RAND_MAX));
#endif //(RAND_MAX < 65536)
return ((float)rnd / (float)RAND_MAX);
#endif // WIN32
}
float rand_uniform_strong(float min, float max)
{
if (max < min) {
float swap = min;
min = max;
max = swap;
}
return (random_float() * (max - min)) + min;
}
float rand_precalc_random(float min, float max, float random_part)
{
if (max < min) {
float swap = min;
min = max;
max = swap;
}
return (random_part * (max - min)) + min;
}
#define RS_SCALE (1.0 / (1.0 + RAND_MAX))
double double_rand(void)
{
double d;
do {
d = (((rand() * RS_SCALE) + rand()) * RS_SCALE + rand()) * RS_SCALE;
} while (d >= 1); // Round off
return d;
}
unsigned int uint_rand(unsigned int less_than)
{
return (unsigned int)((less_than)* double_rand());
}
int check_array_is_nan(float *arr, int size)
{
int i;
for (i = 0; i < size; ++i) {
if (isnan(arr[i])) return 1;
}
return 0;
}
int check_array_is_inf(float *arr, int size)
{
int i;
for (i = 0; i < size; ++i) {
if (isinf(arr[i])) return 1;
}
return 0;
}
int *random_index_order(int min, int max)
{
int *inds = (int *)xcalloc(max - min, sizeof(int));
int i;
for (i = min; i < max; ++i) {
inds[i - min] = i;
}
for (i = min; i < max - 1; ++i) {
int swap = inds[i - min];
int index = i + rand() % (max - i);
inds[i - min] = inds[index - min];
inds[index - min] = swap;
}
return inds;
}
int max_int_index(int *a, int n)
{
if (n <= 0) return -1;
int i, max_i = 0;
int max = a[0];
for (i = 1; i < n; ++i) {
if (a[i] > max) {
max = a[i];
max_i = i;
}
}
return max_i;
}
// Absolute box from relative coordinate bounding box and image size
boxabs box_to_boxabs(const box* b, const int img_w, const int img_h, const int bounds_check)
{
boxabs ba;
ba.left = (b->x - b->w / 2.)*img_w;
ba.right = (b->x + b->w / 2.)*img_w;
ba.top = (b->y - b->h / 2.)*img_h;
ba.bot = (b->y + b->h / 2.)*img_h;
if (bounds_check) {
if (ba.left < 0) ba.left = 0;
if (ba.right > img_w - 1) ba.right = img_w - 1;
if (ba.top < 0) ba.top = 0;
if (ba.bot > img_h - 1) ba.bot = img_h - 1;
}
return ba;
}
int make_directory(char *path, int mode)
{
#ifdef WIN32
return _mkdir(path);
#else
return mkdir(path, mode);
#endif
}
char *get_file_name(char *filename)
{
char* file = basename(filename);
char* name = strtok(file, ".");
return name;
}
void get_timestamp(char **timestamp)
{
time_t t = time(NULL);
struct tm tm = *localtime(&t);
sprintf(*timestamp, "%d-%02d-%02d_%02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
// sprintf(*timestamp, "%d-%02d-%02d", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
}
int check_if_file_exists(char* filename)
{
return access(filename, F_OK);
} |
stream.c | /*-----------------------------------------------------------------------*/
/* Program: STREAM */
/* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <sys/time.h>
#include <sys/param.h>
/*-----------------------------------------------------------------------
* INSTRUCTIONS:
*
* 1) STREAM requires different amounts of memory to run on different
* systems, depending on both the system cache size(s) and the
* granularity of the system timer.
* You should adjust the value of 'STREAM_ARRAY_SIZE' (below)
* to meet *both* of the following criteria:
* (a) Each array must be at least 4 times the size of the
* available cache memory. I don't worry about the difference
* between 10^6 and 2^20, so in practice the minimum array size
* is about 3.8 times the cache size.
* Example 1: One Xeon E3 with 8 MB L3 cache
* STREAM_ARRAY_SIZE should be >= 4 million, giving
* an array size of 30.5 MB and a total memory requirement
* of 91.5 MB.
* Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP)
* STREAM_ARRAY_SIZE should be >= 20 million, giving
* an array size of 153 MB and a total memory requirement
* of 458 MB.
* (b) The size should be large enough so that the 'timing calibration'
* output by the program is at least 20 clock-ticks.
* Example: most versions of Windows have a 10 millisecond timer
* granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds.
* If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec.
* This means the each array must be at least 1 GB, or 128M elements.
*
* Version 5.10 increases the default array size from 2 million
* elements to 10 million elements in response to the increasing
* size of L3 caches. The new default size is large enough for caches
* up to 20 MB.
* Version 5.10 changes the loop index variables from "register int"
* to "ssize_t", which allows array indices >2^32 (4 billion)
* on properly configured 64-bit systems. Additional compiler options
* (such as "-mcmodel=medium") may be required for large memory runs.
*
* Array size can be set at compile time without modifying the source
* code for the (many) compilers that support preprocessor definitions
* on the compile line. E.g.,
* gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M
* will override the default size of 10M with a new size of 100M elements
* per array.
*/
#ifndef STREAM_ARRAY_SIZE
# define STREAM_ARRAY_SIZE 10000000
#endif
/* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result
* for any iteration after the first, therefore the minimum value
* for NTIMES is 2.
* There are no rules on maximum allowable values for NTIMES, but
* values larger than the default are unlikely to noticeably
* increase the reported performance.
* NTIMES can also be set on the compile line without changing the source
* code using, for example, "-DNTIMES=7".
*/
#ifdef NTIMES
#if NTIMES<=1
# define NTIMES 10
#endif
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
/* Users are allowed to modify the "OFFSET" variable, which *may* change the
* relative alignment of the arrays (though compilers may change the
* effective offset by making the arrays non-contiguous on some systems).
* Use of non-zero values for OFFSET can be especially helpful if the
* STREAM_ARRAY_SIZE is set to a value close to a large power of 2.
* OFFSET can also be set on the compile line without changing the source
* code using, for example, "-DOFFSET=56".
*/
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with optimization. Many compilers generate
* unreasonably bad code before the optimizer tightens things up.
* If the results are unreasonably good, on the other hand, the
* optimizer might be too smart for me!
*
* For a simple single-core version, try compiling with:
* cc -O stream.c -o stream
* This is known to work on many, many systems....
*
* To use multiple cores, you need to tell the compiler to obey the OpenMP
* directives in the code. This varies by compiler, but a common example is
* gcc -O -fopenmp stream.c -o stream_omp
* The environment variable OMP_NUM_THREADS allows runtime control of the
* number of threads/cores used when the resulting "stream_omp" program
* is executed.
*
* To run with single-precision variables and arithmetic, simply add
* -DSTREAM_TYPE=float
* to the compile line.
* Note that this changes the minimum array sizes required --- see (1) above.
*
* The preprocessor directive "TUNED" does not do much -- it simply causes the
* code to call separate functions to execute each kernel. Trivial versions
* of these functions are provided, but they are *not* tuned -- they just
* provide predefined interfaces to be replaced with tuned code.
*
*
* 4) Optional: Mail the results to mccalpin@cs.virginia.edu
* Be sure to include info that will help me understand:
* a) the computer hardware configuration (e.g., processor model, memory type)
* b) the compiler name/version and compilation flags
* c) any run-time information (such as OMP_NUM_THREADS)
* d) all of the output from the test case.
*
* Thanks!
*
*-----------------------------------------------------------------------*/
# define HLINE "-------------------------------------------------------------\n"
#ifndef STREAM_TYPE
#define STREAM_TYPE double
#endif
static STREAM_TYPE a[STREAM_ARRAY_SIZE + OFFSET],
b[STREAM_ARRAY_SIZE + OFFSET],
c[STREAM_ARRAY_SIZE + OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE,
3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(STREAM_TYPE scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(STREAM_TYPE scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main() {
int quantum, checktick();
int BytesPerWord;
int k;
ssize_t j;
STREAM_TYPE scalar;
double t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.10 $\n");
printf(HLINE);
BytesPerWord = sizeof(STREAM_TYPE);
printf("This system uses %d bytes per array element.\n",
BytesPerWord);
printf(HLINE);
#ifdef N
printf("***** WARNING: ******\n");
printf(" It appears that you set the preprocessor variable N when compiling this code.\n");
printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n");
printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE);
printf("***** WARNING: ******\n");
#endif
printf("Array size = %llu (elements), Offset = %d (elements)\n", (unsigned long long) STREAM_ARRAY_SIZE, OFFSET);
printf("Memory per array = %.1f MiB (= %.1f GiB).\n",
BytesPerWord * ((double) STREAM_ARRAY_SIZE / 1024.0 / 1024.0),
BytesPerWord * ((double) STREAM_ARRAY_SIZE / 1024.0 / 1024.0 / 1024.0));
printf("Total memory required = %.1f MiB (= %.1f GiB).\n",
(3.0 * BytesPerWord) * ((double) STREAM_ARRAY_SIZE / 1024.0 / 1024.),
(3.0 * BytesPerWord) * ((double) STREAM_ARRAY_SIZE / 1024.0 / 1024. / 1024.));
printf("Each kernel will be executed %d times.\n", NTIMES);
printf(" The *best* time for each kernel (excluding the first iteration)\n");
printf(" will be used to compute the reported bandwidth.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
#ifdef _OPENMP
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf ("Number of Threads counted = %i\n",k);
#endif
/* Get initial value for system clock. */
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ((quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t);
printf(" (= %d clock ticks)\n", (int) (t / quantum));
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k = 0; k < NTIMES; k++) {
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
b[j] = scalar * c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
c[j] = a[j] + b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j = 0; j < STREAM_ARRAY_SIZE; j++)
a[j] = b[j] + scalar * c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k = 1; k < NTIMES; k++) /* note -- skip first iteration */
{
for (j = 0; j < 4; j++) {
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Best Rate MB/s Avg time Min time Max time\n");
for (j = 0; j < 4; j++) {
avgtime[j] = avgtime[j] / (double) (NTIMES - 1);
printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j],
1.0E-06 * bytes[j] / mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick() {
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while (((t2 = mysecond()) - t1) < 1.0E-6);
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int) (1.0E6 * (timesfound[i] - timesfound[i - 1]));
minDelta = MIN(minDelta, MAX(Delta, 0));
}
return (minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp, &tzp);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
void checkSTREAMresults() {
STREAM_TYPE aj, bj, cj, scalar;
STREAM_TYPE aSumErr, bSumErr, cSumErr;
STREAM_TYPE aAvgErr, bAvgErr, cAvgErr;
double epsilon;
ssize_t j;
int k, ierr, err;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k = 0; k < NTIMES; k++) {
cj = aj;
bj = scalar * cj;
cj = aj + bj;
aj = bj + scalar * cj;
}
/* accumulate deltas between observed and expected results */
aSumErr = 0.0;
bSumErr = 0.0;
cSumErr = 0.0;
for (j = 0; j < STREAM_ARRAY_SIZE; j++) {
aSumErr += abs(a[j] - aj);
bSumErr += abs(b[j] - bj);
cSumErr += abs(c[j] - cj);
// if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN
}
aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE;
if (sizeof(STREAM_TYPE) == 4) {
epsilon = 1.e-6;
} else if (sizeof(STREAM_TYPE) == 8) {
epsilon = 1.e-13;
} else {
printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n", sizeof(STREAM_TYPE));
epsilon = 1.e-6;
}
err = 0;
if (abs(aAvgErr / aj) > epsilon) {
err++;
printf("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n", epsilon);
printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", aj, aAvgErr, abs(aAvgErr) / aj);
ierr = 0;
for (j = 0; j < STREAM_ARRAY_SIZE; j++) {
if (abs(a[j] / aj - 1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,aj,a[j],abs((aj-a[j])/aAvgErr));
}
#endif
}
}
printf(" For array a[], %d errors were found.\n", ierr);
}
if (abs(bAvgErr / bj) > epsilon) {
err++;
printf("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n", epsilon);
printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", bj, bAvgErr, abs(bAvgErr) / bj);
printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon);
ierr = 0;
for (j = 0; j < STREAM_ARRAY_SIZE; j++) {
if (abs(b[j] / bj - 1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,bj,b[j],abs((bj-b[j])/bAvgErr));
}
#endif
}
}
printf(" For array b[], %d errors were found.\n", ierr);
}
if (abs(cAvgErr / cj) > epsilon) {
err++;
printf("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n", epsilon);
printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n", cj, cAvgErr, abs(cAvgErr) / cj);
printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon);
ierr = 0;
for (j = 0; j < STREAM_ARRAY_SIZE; j++) {
if (abs(c[j] / cj - 1.0) > epsilon) {
ierr++;
#ifdef VERBOSE
if (ierr < 10) {
printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n",
j,cj,c[j],abs((cj-c[j])/cAvgErr));
}
#endif
}
}
printf(" For array c[], %d errors were found.\n", ierr);
}
if (err == 0) {
printf("Solution Validates: avg error less than %e on all three arrays\n", epsilon);
}
#ifdef VERBOSE
printf ("Results Validation Verbose Results: \n");
printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj);
printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]);
printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj));
#endif
}
#ifdef TUNED
/* stubs for "tuned" versions of the kernels */
void tuned_STREAM_Copy()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(STREAM_TYPE scalar)
{
ssize_t j;
#pragma omp parallel for
for (j=0; j<STREAM_ARRAY_SIZE; j++)
a[j] = b[j]+scalar*c[j];
}
/* end of stubs for the "tuned" versions of the kernels */
#endif |
GB_binop__ge_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_uint16
// A.*B function (eWiseMult): GB_AemultB__ge_uint16
// A*D function (colscale): GB_AxD__ge_uint16
// D*A function (rowscale): GB_DxB__ge_uint16
// C+=B function (dense accum): GB_Cdense_accumB__ge_uint16
// C+=b function (dense accum): GB_Cdense_accumb__ge_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint16
// C=scalar+B GB_bind1st__ge_uint16
// C=scalar+B' GB_bind1st_tran__ge_uint16
// C=A+scalar GB_bind2nd__ge_uint16
// C=A'+scalar GB_bind2nd_tran__ge_uint16
// C type: bool
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT16 || GxB_NO_GE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BitmapPrimitiveShape.h | #ifndef BITMAPPRIMITIVESHAPE_HEADER
#define BITMAPPRIMITIVESHAPE_HEADER
#include "BasePrimitiveShape.h"
#include <GfxTL/AABox.h>
#include <MiscLib/Vector.h>
#include <algorithm>
#include <istream>
#include <MiscLib/Performance.h>
#include <GfxTL/MathHelper.h>
#include <GfxTL/IndexedIterator.h>
#include "IndexIterator.h"
#include <MiscLib/Pair.h>
#ifdef DOPARALLEL
#include <omp.h>
#endif
#ifndef DLL_LINKAGE
#define DLL_LINKAGE
#endif
struct BitmapInfo
{
MiscLib::Vector< std::pair< float, float > > params;
MiscLib::Vector< char > bitmap;
GfxTL::AABox< GfxTL::Vector2Df > bbox;
MiscLib::Vector< size_t > bmpIdx;
size_t uextent, vextent;
};
class DLL_LINKAGE BitmapPrimitiveShape
: public BasePrimitiveShape
{
public:
bool Init(bool binary, std::istream *i);
size_t ConnectedComponent(const PointCloud &pc, float epsilon,
MiscLib::Vector< size_t > *indices, bool doFiltering = true, float* borderRatio = 0 );
size_t AllConnectedComponents(const PointCloud &pc, float epsilon, BitmapInfo& bitmapInfo,
MiscLib::Vector< size_t > *indices, MiscLib::Vector< int >& componentsImg,
MiscLib::Vector< std::pair< int, size_t > >& labels, bool doFiltering = true );
void TrimmingPolygons(const PointCloud &pc, float epsilon,
size_t begin, size_t end,
std::deque< ComponentPolygons > *polys) const;
void GenerateBitmapPoints(const PointCloud &pc, float epsilon,
size_t begin, size_t end, PointCloud *bmpPc) const;
public:
virtual void Parameters(const Vec3f &p,
std::pair< float, float > *param) const = 0;
virtual bool InSpace(float u, float v, Vec3f *p, Vec3f *n) const = 0;
virtual void Parameters(GfxTL::IndexedIterator< MiscLib::Vector< size_t >::iterator,
PointCloud::const_iterator > begin,
GfxTL::IndexedIterator< MiscLib::Vector< size_t >::iterator,
PointCloud::const_iterator > end,
MiscLib::Vector< std::pair< float, float > > *bmpParams) const = 0;
virtual void Parameters(GfxTL::IndexedIterator< IndexIterator,
PointCloud::const_iterator > begin,
GfxTL::IndexedIterator< IndexIterator,
PointCloud::const_iterator > end,
MiscLib::Vector< std::pair< float, float > > *bmpParams) const = 0;
virtual void BitmapExtent(float epsilon,
GfxTL::AABox< GfxTL::Vector2Df > *bbox,
MiscLib::Vector< std::pair< float, float > > *params,
size_t *uextent, size_t *vextent) = 0;
virtual void InBitmap(const std::pair< float, float > ¶m,
float epsilon, const GfxTL::AABox< GfxTL::Vector2Df > &bbox,
size_t uextent, size_t vextent,
std::pair< int, int > *inBmp) const = 0;
virtual void PreWrapBitmap(const GfxTL::AABox< GfxTL::Vector2Df > &bbox,
float epsilon, size_t uextent, size_t vextent,
MiscLib::Vector< char > *bmp) const;
virtual void WrapBitmap(const GfxTL::AABox< GfxTL::Vector2Df > &bbox,
float epsilon, bool *uwrap, bool *vwrap) const = 0;
virtual void WrapComponents(const GfxTL::AABox< GfxTL::Vector2Df > &bbox,
float epsilon, size_t uextent, size_t vextent,
MiscLib::Vector< int > *componentImg,
MiscLib::Vector< std::pair< int, size_t > > *labels) const;
virtual bool InSpace(size_t u, size_t v, float epsilon,
const GfxTL::AABox< GfxTL::Vector2Df > &bbox, size_t uextent,
size_t vextent, Vec3f *p, Vec3f *n) const = 0;
template< class IteratorT >
void BuildBitmap(const PointCloud &pc, float *epsilon, IteratorT begin,
IteratorT end, MiscLib::Vector< std::pair< float, float > > *params,
GfxTL::AABox< GfxTL::Vector2Df > *bbox,
MiscLib::Vector< char > *bitmap, size_t *uextent, size_t *vextent,
MiscLib::Vector< size_t > *bmpIdx) const;
template< class IteratorT >
void BuildBitmap(const PointCloud &pc, float *epsilon, IteratorT begin,
IteratorT end, MiscLib::Vector< std::pair< float, float > > *params,
GfxTL::AABox< GfxTL::Vector2Df > *bbox,
MiscLib::Vector< char > *bitmap, size_t *uextent, size_t *vextent,
MiscLib::Vector< size_t > *bmpIdx, size_t border) const;
void BuildPolygons(const PointCloud &pc, float epsilon, size_t begin,
size_t end, GfxTL::AABox< GfxTL::Vector2Df > *bbox,
size_t *uextent, size_t *vextent,
std::deque< ComponentPolygons > *polys) const;
protected:
mutable GfxTL::AABox< GfxTL::Vector2Df > m_extBbox;
};
template< class IteratorT >
void BitmapPrimitiveShape::BuildBitmap(const PointCloud &pc, float *epsilon,
IteratorT begin, IteratorT end, MiscLib::Vector< std::pair< float, float > > *params,
GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< char > *bitmap,
size_t *uextent, size_t *vextent, MiscLib::Vector< size_t > *bmpIdx) const
{
size_t size = end - begin;
params->resize(size);
// compute parameters and extent
Parameters(GfxTL::IndexIterate(begin, pc.begin()),
GfxTL::IndexIterate(end, pc.begin()), params);
bbox->Min() = GfxTL::Vector2Df(std::numeric_limits< float >::infinity(),
std::numeric_limits< float >::infinity());
bbox->Max() = -bbox->Min();
for(size_t i = 0; i < size; ++i)
{
if((*params)[i].first < bbox->Min()[0])
bbox->Min()[0] = (*params)[i].first;
if((*params)[i].first > bbox->Max()[0])
bbox->Max()[0] = (*params)[i].first;
if((*params)[i].second < bbox->Min()[1])
bbox->Min()[1] = (*params)[i].second;
if((*params)[i].second > bbox->Max()[1])
bbox->Max()[1] = (*params)[i].second;
}
// bbox gives the bounding box in parameter space
// we can now set up the bitmap
const_cast< BitmapPrimitiveShape * >(this)->BitmapExtent(*epsilon, bbox, params,
uextent, vextent);
if(*uextent < 2)
*uextent = 2;
if(*vextent < 2)
*vextent = 2;
bitmap->resize((*uextent) * (*vextent));
std::fill(bitmap->begin(), bitmap->end(), false);
// set all true bits in bitmap
bmpIdx->resize(params->size());
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static)
#endif
for(int i = 0; i < size; ++i)
{
std::pair< int, int > bmpParam;
InBitmap((*params)[i], *epsilon, *bbox, *uextent, *vextent, &bmpParam);
// clamp bitmap coords
bmpParam.first = GfxTL::Math< int >::Clamp(bmpParam.first, 0, static_cast<int>(*uextent) - 1);
bmpParam.second = GfxTL::Math< int >::Clamp(bmpParam.second, 0, static_cast<int>(*vextent) - 1);
(*bitmap)[(*bmpIdx)[i] = bmpParam.first + bmpParam.second * (*uextent)] = true;
}
}
template< class IteratorT >
void BitmapPrimitiveShape::BuildBitmap(const PointCloud &pc, float *epsilon,
IteratorT begin, IteratorT end, MiscLib::Vector< std::pair< float, float > > *params,
GfxTL::AABox< GfxTL::Vector2Df > *bbox, MiscLib::Vector< char > *bitmap,
size_t *uextent, size_t *vextent, MiscLib::Vector< size_t > *bmpIdx,
size_t border) const
{
params->resize(end - begin);
// compute parameters and extent
Parameters(pc[*begin].pos, &(*params)[0]);
bbox->Min() = bbox->Max() = GfxTL::Vector2Df((*params)[0].first,
(*params)[0].second);
size_t j = 1;
IteratorT i = begin;
for(++i; i != end; ++i, ++j)
{
Parameters(pc[*i].pos, &(*params)[j]);
if(bbox->Min()[0] > (*params)[j].first)
bbox->Min()[0] = (*params)[j].first;
else if(bbox->Max()[0] < (*params)[j].first)
bbox->Max()[0] = (*params)[j].first;
if(bbox->Min()[1] > (*params)[j].second)
bbox->Min()[1] = (*params)[j].second;
else if(bbox->Max()[1] < (*params)[j].second)
bbox->Max()[1] = (*params)[j].second;
}
// bbox gives the bounding box in parameter space
// we can now set up the bitmap
const_cast< BitmapPrimitiveShape * >(this)->BitmapExtent(*epsilon, bbox, params,
uextent, vextent);
if(*uextent < 2)
*uextent = 2;
if(*vextent < 2)
*vextent = 2;
bitmap->resize(((*uextent) + 2 * border) * ((*vextent) + 2 * border));
std::fill(bitmap->begin(), bitmap->end(), false);
// set all true bits in bitmap
bmpIdx->resize(params->size());
size_t lineWidth = (*uextent) + 2 * border;
for(size_t i = 0; i < params->size(); ++i)
{
std::pair< int, int > bmpParam;
InBitmap((*params)[i], *epsilon, *bbox, *uextent, *vextent, &bmpParam);
// clamp bitmap coords
bmpParam.first = GfxTL::Math< int >::Clamp(bmpParam.first, 0, *uextent - 1);
bmpParam.second = GfxTL::Math< int >::Clamp(bmpParam.second, 0, *vextent - 1);
(*bitmap)[(*bmpIdx)[i] = bmpParam.first + border
+ (bmpParam.second + border) * lineWidth] = true;
}
}
#endif
|
minhash.h | #ifndef MH_H_
#define MH_H_
#pragma once
#include <emmintrin.h>
#include <smmintrin.h>
#include "../seq/types.h"
#include <stdlib.h>
#include <math.h>
#include <unordered_map>
#if defined(_OPENMP)
#include <omp.h>
#endif
typedef uint64 hash_size_t;
struct minhash_fp_t {
std::vector<hash_size_t> v;
};
struct minhash_rand_range_generator_t {
int rand_in_range(int n) {
int r, rand_max = RAND_MAX - (RAND_MAX % n);
while ((r = rand()) >= rand_max);
return r / (rand_max / n);
}
};
typedef uint64 proj_hash_t;
struct bucket_entry {
int sample_id;
proj_hash_t hash;
};
struct sample_support_t {
int id;
int support;
sample_support_t(int _id, int _support): id(_id), support(_support) {};
};
bool sort_by_support(const sample_support_t& lhs, const sample_support_t& rhs) {
return lhs.support > rhs.support;
}
class minhash_t {
public:
int k;
int FP_len; // sample fingerprint length
int B;
// kmer hashing, multiply-shift hash functions (Dietzfelbinger et al)
std::vector<hash_size_t> fp_hash_funcs;
std::vector<std::vector<std::vector<bucket_entry>>> tables; // buckets of sample ids
minhash_t(): minhash_t(0, 0) {}
minhash_t(const int kmer_len, const int fp_len) {
B = 131072;
k = kmer_len;
FP_len = fp_len;
init_hash_funcs();
}
void init_hash_funcs() {
fp_hash_funcs.resize(FP_len);
for(int i = 0; i < FP_len; i++) {
hash_size_t a = 2ULL*rand() + 1ULL; // odd multipliers
fp_hash_funcs[i] = a;
}
}
void init_index_tables() {
tables.resize(FP_len);
for(int i = 0; i < FP_len; i++) {
tables[i].resize(B);
}
}
bool compute_fp(const std::vector<kmer_2bit_t>& keys, minhash_fp_t& fp) {
if(keys.size() == 0) {
std::cout << "Empty key set!\n";
return false;
}
fp.v.resize(FP_len);
#pragma omp parallel for
for(int h = 0; h < FP_len; h++) {
const hash_size_t s = fp_hash_funcs[h];
fp.v[h] = s*keys[0];
for(uint64 i = 1; i < keys.size(); i++) {
const hash_size_t p = keys[i]*s;
if(p < fp.v[h]) {
fp.v[h] = p;
}
}
}
return true;
}
// insert the sample into the appropriate buckets based on its fingerprint projections
void insert(const minhash_fp_t& fp, const int sample_id) {
#pragma omp parallel for
for(int t = 0; t < FP_len; t++) { // for each hash table
const proj_hash_t key = fp.v[t];
const int bucket_id = key % B;
bucket_entry e;
e.sample_id = sample_id;
e.hash = key;
tables[t][bucket_id].push_back(e);
}
}
// lookup the sample ids in all the buckets given by the fingerprint
void lookup(const minhash_fp_t& query_fp, std::vector<sample_support_t>& id_counts) {
std::vector<int> sample_ids;
for(int t = 0; t < FP_len; t++) { // for each hash table
const proj_hash_t key = query_fp.v[t];
const int bucket_id = key % B;
for(unsigned int i = 0; i < tables[t][bucket_id].size(); i++) {
bucket_entry e = tables[t][bucket_id][i];
if(e.hash != key) continue;
sample_ids.push_back(e.sample_id);
}
}
std::sort(sample_ids.begin(), sample_ids.end());
if(sample_ids.size() == 0) return;
int c = 1;
for(uint64 i = 1; i < sample_ids.size(); i++) {
if(sample_ids[i] == sample_ids[i-1]) {
c++;
} else {
id_counts.push_back(sample_support_t(sample_ids[i-1], c));
c = 1;
}
}
id_counts.push_back(sample_support_t(sample_ids[sample_ids.size()-1], c));
std::sort(id_counts.begin(), id_counts.end(), sort_by_support);
//std::vector<int>::iterator it = std::unique(sample_ids.begin(), sample_ids.end());
//sample_ids.erase(it, sample_ids.end());
}
static void save_fp_to_file(std::string& fname, const int k, const minhash_fp_t& fp) {
fname += std::string(".k");
fname += std::to_string(k);
fname += std::string(".L");
fname += std::to_string(fp.v.size());
fname += std::string(".gaf");
std::ofstream file;
file.open(fname.c_str(), std::ios::out | std::ios::binary);
file.write(reinterpret_cast<const char*>(&(fp.v[0])), fp.v.size()*sizeof(hash_size_t));
file.close();
}
static void load_fp_from_file(const std::string& fname, const int fp_len, minhash_fp_t& fp) {
fp.v.resize(fp_len);
std::ifstream file;
file.open(fname.c_str(), std::ios::in | std::ios::binary);
file.read(reinterpret_cast<char*>(&(fp.v[0])), fp_len*sizeof(hash_size_t));
file.close();
}
static int compare_fps(const minhash_fp_t& fp1, const minhash_fp_t& fp2) {
int count = 0;
for(unsigned int i = 0; i < fp1.v.size(); i++) {
if(fp1.v[i] == fp2.v[i]) {
count++;
}
}
return count;
}
void save_index_to_file(std::string& fname) {
fname += std::string(".k");
fname += std::to_string(k);
fname += std::string(".L");
fname += std::to_string(FP_len);
fname += std::string(".gaf.idx");
std::ofstream file;
file.open(fname.c_str(), std::ios::out | std::ios::binary);
for(int i = 0; i < FP_len; i++) {
for(int j = 0; j < B; j++) {
int table_size = tables[i][j].size();
file.write(reinterpret_cast<char*>(&table_size), sizeof(table_size));
file.write(reinterpret_cast<const char*>(&(tables[i][j][0])), table_size*sizeof(bucket_entry));
}
}
file.close();
}
void load_index_from_file(const std::string& fname) {
std::ifstream file;
file.open(fname.c_str(), std::ios::in | std::ios::binary);
for(int i = 0; i < FP_len; i++) {
for(int j = 0; j < B; j++) {
int table_size;
file.read(reinterpret_cast<char*>(&table_size), sizeof(table_size));
tables[i][j].resize(table_size);
file.read(reinterpret_cast<char*>(&(tables[i][j][0])), table_size*sizeof(bucket_entry));
}
}
}
};
#endif
|
fFDMEX.c | #include "mex.h"
#ifdef __GNU__
#include <omp.h>
#endif
#ifndef MAXCORES
#define MAXCORES 1
#endif
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize nD, NP=1, NV=1, NS=1, NT=1, RT=1;
const mwSize *sz;
mxClassID precision;
mxArray *FD;
mwSize np, nv, ns, nt, ind, sh, sh_rt, sh_nt, sh_ns, jump, dir;
long long rt;
double *pdir, *pIr, *pIi, *pFDr, *pFDi;
float *pdirf, *pIrf, *pIif, *pFDrf, *pFDif;
/* Get sizes and data type */
np = mxGetM(right[0]);
nv = mxGetN(right[0]);
nD = mxGetNumberOfDimensions(right[0]);
sz = mxGetDimensions(right[0]);
precision = mxGetClassID(right[0]);
/* Determine how many rows, columns, depth, time, and other dimensions */
/* This is done to parallelize the "other dimensions" */
NP = sz[0];
if (nD > 1)
NV = sz[1];
if (nD > 2)
NS = sz[2];
if (nD > 3)
NT = sz[3];
if (nD > 4) {
for (np=4; np<nD; np++){
RT *= sz[np];
}
}
/*mexPrintf("NP: %i\n",NP);
mexPrintf("NV: %i\n",NV);
mexPrintf("NS: %i\n",NS);
mexPrintf("NT: %i\n",NT);
mexPrintf("RT: %i\n",RT);*/
/* Check if complex */
/*if (!mxIsComplex(right[0]))
mexErrMsgTxt("Function requires complex data");*/
/* Create output and get input/output pointers */
if (precision == mxDOUBLE_CLASS) {
if (mxIsComplex(right[0])) {
FD = mxCreateNumericArray(nD,sz,precision,mxCOMPLEX);
pIi = mxGetPi(right[0]);
pFDi = mxGetPi(FD);
}
else {
FD = mxCreateNumericArray(nD,sz,precision,mxREAL);
}
pIr = mxGetPr(right[0]);
pFDr = mxGetPr(FD);
}
else {
if (mxIsComplex(right[0])) {
FD = mxCreateNumericArray(nD,sz,precision,mxCOMPLEX);
pIif = mxGetImagData(right[0]);
pFDif = mxGetImagData(FD);
}
else {
FD = mxCreateNumericArray(nD,sz,precision,mxREAL);
}
pIrf = mxGetData(right[0]);
pFDrf = mxGetData(FD);
}
/* Get transform direction */
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS) {
pdir = mxGetData(right[1]);
dir = (mwSize) pdir[0];
}
else {
pdirf = mxGetData(right[1]);
dir = (mwSize) pdirf[0];
}
#ifdef __GNU__
/* Set number of threads */
omp_set_num_threads(MAXCORES);
#endif
/* Compute finite differences */
if (precision == mxDOUBLE_CLASS) {
if (dir == 1) {
jump = 1;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP-1; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
pFDi[ind] = pIi[ind+jump] - pIi[ind];
}
/*ind++;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];*/
}
}
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP-1; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
}
/*ind++;
* pFDr[ind] = -pIr[ind];*/
}
}
}
}
}
}
else if (dir == 2) {
jump = NP;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV-1; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
pFDi[ind] = pIi[ind+jump] - pIi[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];
* }*/
}
}
}
}
else{
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV-1; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* }*/
}
}
}
}
}
else if (dir == 3) {
jump = NP*NV;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS-1; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
pFDi[ind] = pIi[ind+jump] - pIi[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* }
* }*/
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS-1; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* }
* }*/
}
}
}
}
else if (dir == 4) {
jump = NP*NV*NS;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT-1; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
pFDi[ind] = pIi[ind+jump] - pIi[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* }
* }
* }*/
}
}
else{
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT-1; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDr[ind] = pIr[ind+jump] - pIr[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* }
* }
* }*/
}
}
}
else
mexErrMsgTxt("Unsupported transform direction");
}
/* Single precision */
else {
if (dir == 1) {
jump = 1;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP-1; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
pFDif[ind] = pIif[ind+jump] - pIif[ind];
}
/*ind++;
* pFDrf[ind] = -pIrf[ind];
*/
}
}
}
}
}
else{
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP-1; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
}
/*ind++;
* pFDrf[ind] = -pIrf[ind];
*/
}
}
}
}
}
}
else if (dir == 2) {
jump = NP;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV-1; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
pFDif[ind] = pIif[ind+jump] - pIif[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDrf[ind] = -pIrf[ind];
* pFDif[ind] = -pIif[ind];
* }*/
}
}
}
}
else{
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV-1; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDrf[ind] = -pIrf[ind];
* }*/
}
}
}
}
}
else if (dir == 3) {
jump = NP*NV;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS-1; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
pFDif[ind] = pIif[ind+jump] - pIif[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDrf[ind] = -pIrf[ind];
* pFDif[ind] = -pIif[ind];
* }
* }*/
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS-1; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDrf[ind] = -pIrf[ind];
* }
* }*/
}
}
}
}
else if (dir == 4) {
jump = NP*NV*NS;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT-1; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
pFDif[ind] = pIif[ind+jump] - pIif[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDrf[ind] = -pIrf[ind];
* pFDif[ind] = -pIif[ind];
* }
* }
* }*/
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT-1; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pFDrf[ind] = pIrf[ind+jump] - pIrf[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDrf[ind] = -pIrf[ind];
* }
* }
* }*/
}
}
}
else
mexErrMsgTxt("Unsupported transform direction");
}
/* Output */
left[0] = FD;
}
|
matrix.c | #include "matrix.h"
#include <cblas.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
Matrix *create_mat(char *path) {
Matrix *m = malloc(sizeof(Matrix));
m->row_len = 0;
m->col_len = 0;
FILE *myfile = fopen(path, "r");
int ch = 0;
while (ch != EOF) {
ch = fgetc(myfile);
if (m->row_len == 0 && ch == ' ') {
m->col_len++;
}
if (ch == '\n') {
m->row_len++;
}
};
m->col_len++;
m->p_mat = calloc(m->row_len, sizeof(float *));
for (int i = 0; i < m->row_len; i++) {
m->p_mat[i] = calloc(m->col_len, sizeof(float));
}
rewind(myfile);
for (int i = 0; i < m->row_len; i++) {
for (int j = 0; j < m->col_len; j++) {
if (!fscanf(myfile, "%f", &m->p_mat[i][j])) {
break;
}
}
}
fclose(myfile);
return m;
}
void delete_mat(Matrix *mat) {
for (int i = 0; i < mat->row_len; i++) {
free(mat->p_mat[i]);
}
free(mat->p_mat);
free(mat);
}
void copy_mat(Matrix *target, Matrix *source) {
delete_mat(target);
target->row_len = source->row_len;
target->col_len = source->col_len;
target->p_mat = calloc(target->row_len, sizeof(float *));
for (int i = 0; i < target->row_len; i++) {
target->p_mat[i] = calloc(target->col_len, sizeof(float));
}
for (int i = 0; i < target->row_len; i++) {
for (int j = 0; j < target->col_len; j++) {
target->p_mat[i][j] = source->p_mat[i][j];
}
}
}
Matrix *multiple_mat(Matrix *mat1, Matrix *mat2) {
Matrix *result = malloc(sizeof(Matrix));
result->row_len = mat1->row_len;
result->col_len = mat2->col_len;
result->p_mat = calloc(result->row_len, sizeof(float *));
for (int i = 0; i < result->row_len; ++i)
result->p_mat[i] = calloc(result->col_len, sizeof(float));
#pragma omp parallel for
for (int i = 0; i < result->row_len; i++) {
for (int k = 0; k < mat1->col_len; k++) {
float tmp = mat1->p_mat[i][k];
for (int j = 0; j < result->col_len; j++) {
result->p_mat[i][j] += tmp * mat2->p_mat[k][j];
}
}
}
return result;
}
void write_mat(char *path, Matrix *mat) {
FILE *myfile = fopen(path, "w");
for (int i = 0; i < mat->row_len; i++) {
for (int j = 0; j < mat->col_len; j++) {
if (j == mat->col_len - 1) {
fprintf(myfile, "%f\n", mat->p_mat[i][j]);
} else {
fprintf(myfile, "%f ", mat->p_mat[i][j]);
}
}
}
}
void display_mat(Matrix *m1) {
for (int i = 0; i < m1->row_len; i++) {
for (int j = 0; j < m1->col_len; j++) {
if (j == m1->col_len - 1) {
printf("%f\n", m1->p_mat[i][j]);
} else {
printf("%f ", m1->p_mat[i][j]);
}
}
}
}
Matrix *calc_with_OpenBLAS(Matrix *m1, Matrix *m2) {
Matrix *result = malloc(sizeof(Matrix));
result->row_len = m1->row_len;
result->col_len = m2->col_len;
result->p_mat = calloc(result->row_len, sizeof(float *));
for (int i = 0; i < result->row_len; ++i)
result->p_mat[i] = calloc(result->col_len, sizeof(float));
const int M = m1->row_len;
const int N = m2->col_len;
const int K = m1->col_len;
int lda = K;
int ldb = N;
int ldc = N;
float *A = calloc(M * K, sizeof(float));
float *B = calloc(K * N, sizeof(float));
float *C = calloc(M * N, sizeof(float));
int k = 0;
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j++) {
A[k] = m1->p_mat[i][j];
k += 1;
}
}
k = 0;
for (int i = 0; i < K; i++) {
for (int j = 0; j < N; j++) {
B[k] = m2->p_mat[i][j];
k += 1;
}
}
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1.0f, A, lda,
B, ldb, 1.0f, C, ldc);
k = 0;
for (int i = 0; i < result->row_len; i++) {
for (int j = 0; j < result->col_len; j++) {
result->p_mat[i][j] = C[k];
k++;
}
}
free(A);
free(B);
free(C);
return result;
} |
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce_kernel.h
* \brief Function definition of elementwise unary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, int* j, int* k) {
*j = 0;
*k = 0;
#pragma unroll
for (int i = ndim-1, idx_t = idx; i >=0; --i) {
const int tmp = idx_t / shape[i];
const int coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > 1) * coord[i];
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
#pragma unroll
for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const int idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = unravel(idx, oshape);
const int j = ravel(coord, lshape);
const int k = ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename DType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto,
const DType* __restrict big, DType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = unravel(idx, sshape);
int j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
coord = unravel(k, rshape);
Reducer::Reduce(val, OP::Map(big[j + dot(coord, rstride)]), residual);
}
assign(&small[idx], addto, val);
}
#ifdef __CUDACC__
#include "broadcast_reduce-inl.cuh"
#else
template<int ndim, typename DType, typename OP>
void binary_broadcast_compute(const int N, const bool addto, const DType *lhs,
const DType *rhs, DType *out, const Shape<ndim> lshape,
const Shape<ndim> rshape, const Shape<ndim> oshape) {
for (int idx = 0; idx < N; ++idx) {
binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape);
}
}
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
if (req == kNullOp) return;
int N = out.shape_.Size();
binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(),
out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
out.shape_.get<ndim>());
}
template<typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute(const int N, const int M, const bool addto,
const DType *big, DType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP>(idx, M, addto, big, small, bshape, sshape, rshape,
rstride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
int N = small.shape_.Size(), M = rshape.Size();
seq_reduce_compute<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride);
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const TBlob& big) {
return 0;
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const TBlob& big, const TBlob& lhs, const TBlob& rhs) {
return 0;
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const int idx, const int M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = unravel(idx, small_shape);
const int idx_big0 = ravel(coord, big_shape);
const int idx_lhs0 = ravel(coord, lhs_shape0);
const int idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (int k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
int idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
int idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
int idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const int N, const int M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int idx = 0; idx < N; ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
int N = small.shape_.Size();
int M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
cloud.c | #include <string>
#include <iostream>
#include <algorithm>
#include <utility>
#include <tfhe/tfhe.h>
#include <tfhe/tfhe_io.h>
#include <stdio.h>
#include <time.h>
#include <vector>
#include <cassert>
#include <sys/time.h>
#include <omp.h>
#include <fstream>
using namespace std;
ifstream read;
#define T_FILE "averagestandard.txt"
void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset)
{
const LweParams *in_out_params = keyset->params->in_out_params;
LweSample *carry = new_LweSample_array(1, in_out_params);
LweSample *axc = new_LweSample_array(1, in_out_params);
LweSample *bxc = new_LweSample_array(1, in_out_params);
bootsCOPY(carry, c, keyset);
for(int32_t i = 0; i < nb_bits; i++)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsXOR(axc, x + i, carry, keyset);
#pragma omp section
bootsXOR(bxc, y + i, carry, keyset);
}
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsXOR(sum + i, x + i, bxc, keyset);
#pragma omp section
bootsAND(axc, axc, bxc, keyset);
}
bootsXOR(carry, carry, axc, keyset);
}
bootsCOPY(carryover, carry, keyset);
delete_LweSample_array(1, carry);
delete_LweSample_array(1, axc);
delete_LweSample_array(1, bxc);
}
void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size)
{
for(int i = 0; i < size; i++){
bootsCONSTANT(result + i, 0, keyset);}
}
void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size)
{
for(int i = 0; i < size; i++){
bootsNOT(result + i, x + i, keyset);}
}
void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
const LweParams *in_out_params = keyset->params->in_out_params;
LweSample *sum = new_LweSample_array(32, in_out_params);
LweSample *sum2 = new_LweSample_array(32, in_out_params);
LweSample *sum3 = new_LweSample_array(32, in_out_params);
LweSample *carryover = new_LweSample_array(32, in_out_params);
LweSample *carryover2 = new_LweSample_array(32, in_out_params);
LweSample *carryover3 = new_LweSample_array(32, in_out_params);
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum + i, 0, keyset);
bootsCONSTANT(sum2 + i, 0, keyset);
bootsCONSTANT(sum3 + i, 0, keyset);
bootsCONSTANT(carryover + i, 0, keyset);
bootsCONSTANT(carryover2 + i, 0, keyset);
bootsCONSTANT(carryover3 + i, 0, keyset);
}
//adding the 2nd result with the carry
add(sum, carryover, e, b, carry, nb_bits, keyset);
add(sum2, carryover2, d, a, carryover, nb_bits, keyset);
add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset);
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCOPY(finalresult + i, sum3 + i, keyset);
}
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCOPY(finalresult2 + i, sum2 + i, keyset);
}
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCOPY(finalresult3 + i, sum + i, keyset);
}
delete_LweSample_array(32, sum);
delete_LweSample_array(32, sum2);
delete_LweSample_array(32, sum3);
delete_LweSample_array(32, carryover);
delete_LweSample_array(32, carryover2);
delete_LweSample_array(32, carryover3);
}
void mul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
const LweParams *in_out_params = keyset->params->in_out_params;
//sum of the output
LweSample *sum3c1 = new_LweSample_array(32, in_out_params);
LweSample *sum3c2 = new_LweSample_array(32, in_out_params);
LweSample *tmp = new_LweSample_array(32, in_out_params);
LweSample *tmp2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c1 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c2 = new_LweSample_array(32, in_out_params);
LweSample *carry1 = new_LweSample_array(32, in_out_params);
LweSample *carry2 = new_LweSample_array(32, in_out_params);
//set all these to 0
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum3c1 + i, 0, keyset);
bootsCONSTANT(sum3c2 + i, 0, keyset);
bootsCONSTANT(tmp + i, 0, keyset);
bootsCONSTANT(tmp2 + i, 0, keyset);
bootsCONSTANT(tmp3c1 + i, 0, keyset);
bootsCONSTANT(tmp3c2 + i, 0, keyset);
bootsCONSTANT(carry1 + i, 0, keyset);
bootsCONSTANT(carry2 + i, 0, keyset);
}
//multiply all the bits together with the other bits..
int round = 0;
for (int32_t i = 0; i < nb_bits; ++i)
{
for (int32_t k = 0; k < nb_bits; ++k)
{
//this is basically multiplying as it is an AND gate
//a(ciphertext1) should be the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsAND(tmp + k, a + k, b + i, keyset);
}
}
if (round > 0) {
for (int32_t i = 0; i < round; ++i) {
//putting number of 0s infront
bootsCONSTANT(tmp3c1 + i, 0, keyset);
}
}
//copy all the bits that fit into a int32 with the 0s inside
for (int32_t i = 0; i < 32 - round; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c1 + i + round , tmp + i, keyset);
}
}
//the rest of the bits that couldnt fit inside
for (int32_t i = 0; i < round; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset);
}
}
add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset);
add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset);
round++;
}
for (int32_t i = 0; i < 32; ++i)
{
bootsCOPY(result + i, sum3c2 + i, keyset);
bootsCOPY(result2 + i, sum3c1 + i, keyset);
}
delete_LweSample_array(32, sum3c1);
delete_LweSample_array(32, sum3c2);
delete_LweSample_array(32, tmp);
delete_LweSample_array(32, tmp2);
delete_LweSample_array(32, tmp3c1);
delete_LweSample_array(32, tmp3c2);
delete_LweSample_array(32, carry1);
delete_LweSample_array(32, carry2);
}
void mul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
const LweParams *in_out_params = keyset->params->in_out_params;
//sum of the output
LweSample *sum3c1 = new_LweSample_array(32, in_out_params);
LweSample *sum3c2 = new_LweSample_array(32, in_out_params);
LweSample *sum3c3 = new_LweSample_array(32, in_out_params);
LweSample *tmp = new_LweSample_array(32, in_out_params);
LweSample *tmp2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c1 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c3 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c4 = new_LweSample_array(32, in_out_params);
LweSample *carry1 = new_LweSample_array(32, in_out_params);
LweSample *carry2 = new_LweSample_array(32, in_out_params);
LweSample *carry3 = new_LweSample_array(32, in_out_params);
LweSample *carry4 = new_LweSample_array(32, in_out_params);
//set all these to 0
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum3c1 + i, 0, keyset);
bootsCONSTANT(sum3c2 + i, 0, keyset);
bootsCONSTANT(sum3c3 + i, 0, keyset);
bootsCONSTANT(tmp + i, 0, keyset);
bootsCONSTANT(tmp2 + i, 0, keyset);
bootsCONSTANT(tmp3c1 + i, 0, keyset);
bootsCONSTANT(tmp3c2 + i, 0, keyset);
bootsCONSTANT(tmp3c3 + i, 0, keyset);
bootsCONSTANT(tmp3c4 + i, 0, keyset);
bootsCONSTANT(carry1 + i, 0, keyset);
bootsCONSTANT(carry2 + i, 0, keyset);
bootsCONSTANT(carry3 + i, 0, keyset);
bootsCONSTANT(carry4 + i, 0, keyset);
}
//multiply all the bits together with the other bits..
int round = 0;
int counter1 = 0;
int counter2 = 0;
for (int32_t i = 0; i < nb_bits; ++i)
{
for (int32_t k = 0; k < nb_bits; ++k)
{
//this is basically multiplying as it is an AND gate
//a(ciphertext1) should be the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsAND(tmp + k, a + k, c + i, keyset);
#pragma omp section
bootsAND(tmp2 + k, b + k, c + i, keyset);
}
}
counter1 = 32 - round;
counter2 = 32 - counter1;
if (round > 0) {
for (int32_t i = 0; i < round; ++i) {
//putting number of 0s infront
bootsCONSTANT(tmp3c1 + i, 0, keyset);
}
}
//copy all the bits that fit into a int32 with the 0s inside
//tmp to tmp3c1
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c1 + i + round , tmp + i, keyset);
}
}
//remaining of tmp to tmp3c2
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset);
}
}
//some of tmp2 to remaining of tmp3c2
//repeats 31 times
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset);
}
}
//the rest of tmp2 to tmp3c3
//repeats 1 time
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset);
}
}
add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset);
add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset);
add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset);
round++;
}
for (int32_t i = 0; i < 32; ++i)
{
bootsCOPY(result + i, sum3c3 + i, keyset);
bootsCOPY(result2 + i, sum3c2 + i, keyset);
bootsCOPY(result3 + i, sum3c1 + i, keyset);
}
delete_LweSample_array(32, sum3c1);
delete_LweSample_array(32, sum3c2);
delete_LweSample_array(32, sum3c3);
delete_LweSample_array(32, tmp);
delete_LweSample_array(32, tmp2);
delete_LweSample_array(32, tmp3c1);
delete_LweSample_array(32, tmp3c2);
delete_LweSample_array(32, tmp3c3);
delete_LweSample_array(32, tmp3c4);
delete_LweSample_array(32, carry1);
delete_LweSample_array(32, carry2);
delete_LweSample_array(32, carry3);
delete_LweSample_array(32, carry4);
}
void mul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
const LweParams *in_out_params = keyset->params->in_out_params;
//sum of the output
LweSample *sum3c1 = new_LweSample_array(32, in_out_params);
LweSample *sum3c2 = new_LweSample_array(32, in_out_params);
LweSample *sum3c3 = new_LweSample_array(32, in_out_params);
LweSample *sum3c4 = new_LweSample_array(32, in_out_params);
LweSample *sum3c5 = new_LweSample_array(32, in_out_params);
LweSample *tmp = new_LweSample_array(32, in_out_params);
LweSample *tmp2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3 = new_LweSample_array(32, in_out_params);
LweSample *tmp4 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c1 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c3 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c4 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c5 = new_LweSample_array(32, in_out_params);
LweSample *carry1 = new_LweSample_array(32, in_out_params);
LweSample *carry2 = new_LweSample_array(32, in_out_params);
LweSample *carry3 = new_LweSample_array(32, in_out_params);
LweSample *carry4 = new_LweSample_array(32, in_out_params);
LweSample *carry5 = new_LweSample_array(32, in_out_params);
//set all these to 0
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum3c1 + i, 0, keyset);
bootsCONSTANT(sum3c2 + i, 0, keyset);
bootsCONSTANT(sum3c3 + i, 0, keyset);
bootsCONSTANT(sum3c4 + i, 0, keyset);
bootsCONSTANT(sum3c5 + i, 0, keyset);
bootsCONSTANT(tmp + i, 0, keyset);
bootsCONSTANT(tmp2 + i, 0, keyset);
bootsCONSTANT(tmp3 + i, 0, keyset);
bootsCONSTANT(tmp4 + i, 0, keyset);
bootsCONSTANT(tmp3c1 + i, 0, keyset);
bootsCONSTANT(tmp3c2 + i, 0, keyset);
bootsCONSTANT(tmp3c3 + i, 0, keyset);
bootsCONSTANT(tmp3c4 + i, 0, keyset);
bootsCONSTANT(tmp3c5 + i, 0, keyset);
bootsCONSTANT(carry1 + i, 0, keyset);
bootsCONSTANT(carry2 + i, 0, keyset);
bootsCONSTANT(carry3 + i, 0, keyset);
bootsCONSTANT(carry4 + i, 0, keyset);
bootsCONSTANT(carry5 + i, 0, keyset);
}
//multiply all the bits together with the other bits..
int round = 0;
int counter1 = 0;
int counter2 = 0;
for (int32_t i = 0; i < nb_bits; ++i)
{
for (int32_t k = 0; k < nb_bits; ++k)
{
//this is basically multiplying as it is an AND gate
//a(ciphertext1) should be the least significant bit
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
bootsAND(tmp + k, a + k, e + i, keyset);
#pragma omp section
bootsAND(tmp2 + k, b + k, e + i, keyset);
#pragma omp section
bootsAND(tmp3 + k, c + k, e + i, keyset);
#pragma omp section
bootsAND(tmp4 + k, d + k, e + i, keyset);
}
}
counter1 = 32 - round;
counter2 = 32 - counter1;
if (round > 0) {
for (int32_t i = 0; i < round; ++i) {
//putting number of 0s infront
bootsCONSTANT(tmp3c1 + i, 0, keyset);
}
}
//copy all the bits that fit into a int32 with the 0s inside
//tmp to tmp3c1
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c1 + i + round , tmp + i, keyset);
}
}
//remaining of tmp to tmp3c2
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset);
}
}
//some of tmp2 to remaining of tmp3c2
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset);
}
}
//remaining tmp2 to tmp3c3
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset);
}
}
//some of tmp3 to remaining tmp3c3
for (int32_t i = 0; i < counter1; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset);
}
}
//rest of tmp3 to tmp3c4
for (int32_t i = 0; i < counter2; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset);
}
}
//some of tmp4 to remaining tmp3c4
for (int32_t i = 0; i < counter1; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset);
}
}
//rest of tmp4 to tmp3c5
for (int32_t i = 0; i < counter2; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset);
}
}
add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset);
add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset);
add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset);
add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset);
add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset);
round++;
}
for (int32_t i = 0; i < 32; ++i)
{
bootsCOPY(result + i, sum3c5 + i, keyset);
bootsCOPY(result2 + i, sum3c4 + i, keyset);
bootsCOPY(result3 + i, sum3c3 + i, keyset);
bootsCOPY(result4 + i, sum3c2 + i, keyset);
bootsCOPY(result5 + i, sum3c1 + i, keyset);
}
delete_LweSample_array(32, sum3c1);
delete_LweSample_array(32, sum3c2);
delete_LweSample_array(32, sum3c3);
delete_LweSample_array(32, sum3c4);
delete_LweSample_array(32, sum3c5);
delete_LweSample_array(32, tmp);
delete_LweSample_array(32, tmp2);
delete_LweSample_array(32, tmp3);
delete_LweSample_array(32, tmp4);
delete_LweSample_array(32, tmp3c1);
delete_LweSample_array(32, tmp3c2);
delete_LweSample_array(32, tmp3c3);
delete_LweSample_array(32, tmp3c4);
delete_LweSample_array(32, tmp3c5);
delete_LweSample_array(32, carry1);
delete_LweSample_array(32, carry2);
delete_LweSample_array(32, carry3);
delete_LweSample_array(32, carry4);
delete_LweSample_array(32, carry5);
}
int main() {
// dragonfly_cipher_cloud should have already appended 2 cipherstreams into cloud.data
printf("Reading the key...\n");
// reads the cloud key from file
FILE* cloud_key = fopen("cloud.key", "rb");
TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key);
fclose(cloud_key);
// reads the nbit key from file
FILE* nbit_key = fopen("nbit.key","rb");
TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key);
fclose(nbit_key);
// if necessary, the params are inside the key
const TFheGateBootstrappingParameterSet* params = bk->params;
// if necessary, the params are inside the key
const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params;
// Create ciphertext blocks for negative1, bit1, negative2, bit2 and values
LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
printf("Reading input 1...\n");
// reads ciphertexts from cloud.data
FILE* cloud_data = fopen("cloud.data", "rb");
for (int i = 0; i<32; i++) // line0
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams);
for (int i = 0; i<32; i++) // line1
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams);
// Decrypts bit size1
int32_t int_bit1 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0;
int_bit1 |= (ai<<i); }
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params);
for (int i = 0; i<32; i++) // line10
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params);
printf("Reading input 2...\n");
for (int i = 0; i<32; i++) // line11
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams);
for (int i = 0; i<32; i++) // line12
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams);
// Decrypts bit size2
int32_t int_bit2 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0;
int_bit2 |= (ai<<i); }
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params);
for (int i = 0; i<32; i++) // line21
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params);
printf("Reading operation code...\n");
// Get Operation Code from File
int32_t int_op;
read.open("operator.txt");
read >> int_op;
// Homomorphic encryption to add negative1 and negative2 ciphertexts
LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
// add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE
// Decrypts Negative1
int32_t int_negative1 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0;
int_negative1 |= (ai<<i); }
std::cout << int_negative1 << " => negative1" << "\n";
// convert first value negativity code from 2 to 1
if (int_negative1 == 2){
int_negative1 = 1;}
// Decrypts Negative2
int32_t int_negative2 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0;
int_negative2 |= (ai<<i); }
std::cout << int_negative2 << " => negative2" << "\n";
// Add Negatives.
// If both v1 & v2 are positive, int_negative = 0
// If only v1 is negative, int_negative = 1
// If only v2 is negative, int_negative = 2
// If both v1 & v2 are negative, int_negative = 3
int32_t int_negative;
int_negative = (int_negative1 + int_negative2);
// std::cout << int_negative << " -> negatives" << "\n";
//export the negative and bit data for the verif
FILE* answer_data = fopen("answer.data", "wb");
// Write negative to answer.data
int32_t ciphernegative = 0;
if (int_negative == 1){
ciphernegative = 1;
}
if (int_negative == 2){
ciphernegative = 2;
}
if (int_negative == 3){
ciphernegative = 4;
}
for (int i=0; i<32; i++) {
bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey);
}
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams);
std::cout << ciphernegative << " => total negatives" << "\n";
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative);
// Compare bit sizes
int32_t int_bit = 0;
if (int_op == 4){
if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);}
else{int_bit = (int_bit2 * 2);}
for (int i=0; i<32; i++) {
bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);}
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams);
std::cout << int_bit << " written to answer.data" << "\n";
if (int_bit1 >= int_bit2){int_bit = int_bit1;}
else{int_bit = int_bit2;}
}
else if (int_bit1 >= int_bit2) {
int_bit = int_bit1;
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams);
std::cout << int_bit << " written to answer.data" << "\n";
}
else{
int_bit = int_bit2;
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams);
std::cout << int_bit << " written to answer.data" << "\n";
}
fclose(cloud_data);
// If trying to multiply a 256 bit number
if ((int_op == 4) && (int_bit >= 256)){
std::cout << "Cannot multiply 256 bit number!" << "\n";
fclose(answer_data);
return 126;
}
// Addition
//if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative)
// A+B, [(-A)+(-B)], A-(-B), (-A)-(B)
if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) {
if (int_op == 1){
std::cout << int_bit << " bit Addition computation" << "\n";
}else{
std::cout << int_bit << " bit Subtraction computation" << "\n";
}
//32 Bit Addition
if (int_bit == 32)
{
// Ciphertext to hold the result and carry
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
printf("Doing the homomorphic computation...\n");
//Adding component
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
// Timings
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// export the result ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
printf("writing the answer to file...\n");
//Clean up
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//64 Bit Addition
if (int_bit == 64)
{
//Ciphertext to hold the result and carry
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
printf("Doing the homomorphic computation...\n");
//Adding component
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
// export the result ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
//Clean up
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//128 Bit Addition
if (int_bit == 128)
{
//Ciphertext to hold the result and carry
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
printf("Doing the homomorphic computation...\n");
//Adding component
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk);
add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk);
add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk);
// Timing
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
// export the result ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
//Clean up
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//256 Bit Addition
if (int_bit == 256)
{
// do some operations on the ciphertexts: here, we will compute the
// addition of the two
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params);
// Timing
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk);
add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk);
add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk);
add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk);
add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk);
add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk);
add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
// export the 64 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, carry5);
delete_gate_bootstrapping_ciphertext_array(32, carry6);
delete_gate_bootstrapping_ciphertext_array(32, carry7);
delete_gate_bootstrapping_ciphertext_array(32, carry8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext13);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext14);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext16);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
// Subtraction
// If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B
else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){
// Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B)
if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){
if (int_op == 2){
std::cout << int_bit << " bit Subtraction computation" << "\n";
}else {
std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n";
}
//32 Bit Subtraction
if(int_bit == 32)
{
printf("Doing the homomorphic computation...\n");
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) //result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//64 Bit Subtraction
if(int_bit == 64)
{
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
printf("Doing the homomorphic computation...\n");
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
NOT(inverse2, ciphertext10, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//128 Bit Subtraction
if(int_bit == 128)
{
// reads the 2x32 ciphertexts from the cloud file
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
NOT(inverse2, ciphertext10, bk, 32);
NOT(inverse3, ciphertext11, bk, 32);
NOT(inverse4, ciphertext12, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//256 Bit Subtraction
if (int_bit == 256)
{
// reads the 2x32 ciphertexts from the cloud file
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
NOT(inverse2, ciphertext10, bk, 32);
NOT(inverse3, ciphertext11, bk, 32);
NOT(inverse4, ciphertext12, bk, 32);
NOT(inverse5, ciphertext13, bk, 32);
NOT(inverse6, ciphertext14, bk, 32);
NOT(inverse7, ciphertext15, bk, 32);
NOT(inverse8, ciphertext16, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
zero(tempcarry5, bk, 32);
zero(tempcarry6, bk, 32);
zero(tempcarry7, bk, 32);
zero(tempcarry8, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk);
add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk);
add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk);
add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk);
add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk);
add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk);
add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk);
add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("Writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) //result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) //result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) //result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) //result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) //result5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++) //result6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params);
for (int i=0; i<32; i++) //result7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params);
for (int i=0; i<32; i++) //result8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, inverse5);
delete_gate_bootstrapping_ciphertext_array(32, inverse6);
delete_gate_bootstrapping_ciphertext_array(32, inverse7);
delete_gate_bootstrapping_ciphertext_array(32, inverse8);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry5);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry6);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry7);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry8);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult5);
delete_gate_bootstrapping_ciphertext_array(32, twosresult6);
delete_gate_bootstrapping_ciphertext_array(32, twosresult7);
delete_gate_bootstrapping_ciphertext_array(32, twosresult8);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry5);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry6);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry7);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry8);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext13);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext14);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext16);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
//Addition (for subtraction) with value 1 being a negative number (-A)+B
else{
if (int_op == 2){
std::cout << int_bit << " bit Subtraction computation" << "\n";
}else {
std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n";
}
if(int_bit == 32){
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
printf("Doing the homomorphic computation...\n");
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 64){
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
printf("Doing the homomorphic computation...\n");
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
NOT(inverse2, ciphertext2, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first inversed value to the second value, (-a) + b
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) //result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 128){
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
NOT(inverse2, ciphertext2, bk, 32);
NOT(inverse3, ciphertext3, bk, 32);
NOT(inverse4, ciphertext4, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first inversed value to the second value,(-a) + b
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 256){
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
NOT(inverse2, ciphertext2, bk, 32);
NOT(inverse3, ciphertext3, bk, 32);
NOT(inverse4, ciphertext4, bk, 32);
NOT(inverse5, ciphertext5, bk, 32);
NOT(inverse6, ciphertext6, bk, 32);
NOT(inverse7, ciphertext7, bk, 32);
NOT(inverse8, ciphertext8, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
zero(tempcarry5, bk, 32);
zero(tempcarry6, bk, 32);
zero(tempcarry7, bk, 32);
zero(tempcarry8, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk);
add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk);
add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk);
add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first inversed value to the second value, (-a) + b
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk);
add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk);
add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk);
add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk);
add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("Writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // 1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, inverse5);
delete_gate_bootstrapping_ciphertext_array(32, inverse6);
delete_gate_bootstrapping_ciphertext_array(32, inverse7);
delete_gate_bootstrapping_ciphertext_array(32, inverse8);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry5);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry6);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry7);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry8);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult5);
delete_gate_bootstrapping_ciphertext_array(32, twosresult6);
delete_gate_bootstrapping_ciphertext_array(32, twosresult7);
delete_gate_bootstrapping_ciphertext_array(32, twosresult8);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry5);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry6);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry7);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry8);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext13);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext14);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext16);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
}
// If Multiplication
else if (int_op == 4){
std::cout << int_bit << " bit Multiplication computation" << "\n";
if (int_bit == 128){
printf("Doing the homomorphic computation...\n");
// do some operations on the ciphertexts: here, we will compute the
// product of the two
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//result1
mul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk);
//result2
mul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk);
//result3
mul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk);
//result4
mul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk);
add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk);
add(sum2, carryover2, result9, result3,carryover1,32, bk);
add(sum3, carryover3, result8, result2,carryover2,32, bk);
add(sum4, carryover4, result7, result1,carryover3,32, bk);
add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk);
add(sum6, carryover6, sum2, result15,carryover5,32, bk);
add(sum7, carryover7, sum3, result14,carryover6,32, bk);
add(sum8, carryover8, sum4, result13,carryover7,32, bk);
add(sum9, carryover9, sum5, result12,carryover8,32, bk);
add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk);
add(sum11, carryover11, sum7, result20,carryover10,32, bk);
add(sum12, carryover12, sum8, result19,carryover11,32, bk);
add(sum13, carryover13, sum9, result18,carryover12,32, bk);
add(sum14, carryover14, sum10, result17,carryover13,32, bk);
add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// write computation time to file
FILE *t_file;
t_file = fopen(T_FILE, "a");
fprintf(t_file, "%lf\n", get_time);
fclose(t_file);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params);
for (int i=0; i<32; i++) // result5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params);
for (int i=0; i<32; i++) // result6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params);
for (int i=0; i<32; i++) // result7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params);
for (int i=0; i<32; i++) // result8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, result9);
delete_gate_bootstrapping_ciphertext_array(32, result10);
delete_gate_bootstrapping_ciphertext_array(32, result11);
delete_gate_bootstrapping_ciphertext_array(32, result12);
delete_gate_bootstrapping_ciphertext_array(32, result13);
delete_gate_bootstrapping_ciphertext_array(32, result14);
delete_gate_bootstrapping_ciphertext_array(32, result15);
delete_gate_bootstrapping_ciphertext_array(32, result16);
delete_gate_bootstrapping_ciphertext_array(32, result17);
delete_gate_bootstrapping_ciphertext_array(32, result18);
delete_gate_bootstrapping_ciphertext_array(32, result19);
delete_gate_bootstrapping_ciphertext_array(32, result20);
delete_gate_bootstrapping_ciphertext_array(32, sum1);
delete_gate_bootstrapping_ciphertext_array(32, sum2);
delete_gate_bootstrapping_ciphertext_array(32, sum3);
delete_gate_bootstrapping_ciphertext_array(32, sum4);
delete_gate_bootstrapping_ciphertext_array(32, sum5);
delete_gate_bootstrapping_ciphertext_array(32, sum6);
delete_gate_bootstrapping_ciphertext_array(32, sum7);
delete_gate_bootstrapping_ciphertext_array(32, sum8);
delete_gate_bootstrapping_ciphertext_array(32, sum9);
delete_gate_bootstrapping_ciphertext_array(32, sum10);
delete_gate_bootstrapping_ciphertext_array(32, sum11);
delete_gate_bootstrapping_ciphertext_array(32, sum12);
delete_gate_bootstrapping_ciphertext_array(32, sum13);
delete_gate_bootstrapping_ciphertext_array(32, sum14);
delete_gate_bootstrapping_ciphertext_array(32, sum15);
delete_gate_bootstrapping_ciphertext_array(32, carryover1);
delete_gate_bootstrapping_ciphertext_array(32, carryover2);
delete_gate_bootstrapping_ciphertext_array(32, carryover3);
delete_gate_bootstrapping_ciphertext_array(32, carryover4);
delete_gate_bootstrapping_ciphertext_array(32, carryover5);
delete_gate_bootstrapping_ciphertext_array(32, carryover6);
delete_gate_bootstrapping_ciphertext_array(32, carryover7);
delete_gate_bootstrapping_ciphertext_array(32, carryover8);
delete_gate_bootstrapping_ciphertext_array(32, carryover9);
delete_gate_bootstrapping_ciphertext_array(32, carryover10);
delete_gate_bootstrapping_ciphertext_array(32, carryover11);
delete_gate_bootstrapping_ciphertext_array(32, carryover12);
delete_gate_bootstrapping_ciphertext_array(32, carryover13);
delete_gate_bootstrapping_ciphertext_array(32, carryover14);
delete_gate_bootstrapping_ciphertext_array(32, carryover15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 64){
printf("Doing the homomorphic computation...\n");
// do some operations on the ciphertexts: here, we will compute the
// product of the two
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//result1
mul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk);
//result2
mul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk);
split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// write computation time to file
FILE *t_file;
t_file = fopen(T_FILE, "a");
fprintf(t_file, "%lf\n", get_time);
fclose(t_file);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_ciphertext_array(32, finalresult);
delete_gate_bootstrapping_ciphertext_array(32, finalresult2);
delete_gate_bootstrapping_ciphertext_array(32, finalresult3);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 32){
printf("Doing the homomorphic computation...\n");
// do some operations on the ciphertexts: here, we will compute the
// product of the two
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//result1
mul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// write computation time to file
FILE *t_file;
t_file = fopen(T_FILE, "a");
fprintf(t_file, "%lf\n", get_time);
fclose(t_file);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
}
|
mssql12_fmt_plug.c | /* Modified in August, 2012 by Dhiru Kholia (dhiru at openwall.com) for MS SQL 2012
*
* This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Modified by Mathieu Perrin (mathieu at tpfh.org) 09/06
* Microsoft MS-SQL05 password cracker
*
* UTF-8 support by magnum 2011, same terms as above
*
* Creating MS SQL 2012 hashes:
*
* sqlcmd -L
* sqlcmd -S <server> -U sa -P <password>
* 1> select pwdencrypt("openwall")
* 2> go
*
* Dumping hashes from MS SQL server 2012:
*
* sqlcmd -S <server> -U sa -P <password>
* 1> select * from sys.sql_logins
* 2> go
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mssql12;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mssql12);
#else
#include <string.h>
#include "arch.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "sha2.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // tuned K8-dual HT
#endif
#endif
#endif
#define FORMAT_LABEL "mssql12"
#define FORMAT_NAME "MS SQL 2012/2014"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH ((111 - SALT_SIZE) / 2)
#define CIPHERTEXT_LENGTH 54 + 44 * 2
#define BINARY_SIZE 8
#define DIGEST_SIZE 64
#define BINARY_ALIGN 8
#define SALT_SIZE 4
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifndef SHA_BUF_SIZ
#define SHA_BUF_SIZ 16
#endif
static struct fmt_tests tests[] = {
{"0x0200F733058A07892C5CACE899768F89965F6BD1DED7955FE89E1C9A10E27849B0B213B5CE92CC9347ECCB34C3EFADAF2FD99BFFECD8D9150DD6AACB5D409A9D2652A4E0AF16", "Password1!"},
{"0x0200AB3E1F9028A739EEF62ABF672427276A32D5EDD349E638E7F2CD81DAA247CFE20EE4E3B0A30B2D0AE3C3FA010E61752F1BF45E045041F1B988C083C7F118527E3E5F0562", "openwall"},
/* hashes from https://hashcat.net/forum */
{"0x02006BF4AB05873FF0C8A4AFD1DC5912CBFDEF62E0520A3353B04E1184F05C873C9C76BBADDEAAC1E9948C7B6ABFFD62BFEFD7139F17F6AFE10BE0FEE7A178644623067C2423", "carlos"},
{"0x0200935819BA20F1C7289CFF2F8FF9F0E40DA5E6D04986F988CFE6603DA0D2BC0160776614763198967D603FBD8C103151A15E70D18E7B494C7F13F16804A7A4EB206084E632", "test"},
{"0x0200570AC969EF7C6CCB3312E8BEDE1D635EB852C06496957F0FA845B20FCD1C7C457474A5B948B68C47C2CB704D08978871F532C9EB11199BB5F56A06AC915C3799DB8A64C1", "test1"},
{"0x0200A56045DBCD848E297FA8D06E7579D62B7129928CA0BC5D232A7320972EF5A5455C01411B8D3A7FF3D18A55058A12FAEE5DA410AFE6CE61FF5C39E5FF57CD3EDD57DB1C3B", "test2"},
{"0x020059799F1B6D897BE2C5A76D3FFDC52B308190E82FA01F2FA51129B4863A7EE21B3FF6FE9F7850976045237805F338DD36DC9345B429F47A402614C6F2F2B02C56DF14C4F4", "Paul"},
{"0x0200881E2999DD8E3583695F405696257B99559953705A34D774C15AC1D42699BB77BC56DB5F657751335C1B350890E643790553B60329CAE7A2E7D3C04CF8856C4DB0058723", "DBAmaster"},
{"0x0200D648446E70180A6DFB6DF14DB38623EBFE490FE445751900FD5DC45A2B5D20D7AFFE8C6FFC2890BAE1AF34430A21F2F1E4DE50E25757FDB4789716D8D85C6985A00BC454", "database"},
{"0x02008AC3B9DC7B67EF9D3C1D25D8007A4B957D5BD61D71E5E9DA08D9F8F012EDDAD168E1CADD93D4627433FBFEE8BCF6CBB42D5B9A31886FC5FF7F970B164F4B5815E03D6DE7", "jhl9mqe5"},
{"0x020094C4D05A082DB1362B1A972C5D5F1C04C527090A7427E93C13AFEC705A011D8980E994FA647C7D44E25A427246218E25674571DB1710E49C713FB17129549C29E303086A", "coldfusion"},
{"0x0200B9BD5C85918D9BEE84417957618FBA1CB80B71E81550FAE09AD027B4089017CD6461D8EC9509873C2D5096CDBE8F16E4EFA9035C35F9F4917CE58DB99DC6836CEA7483A7", "sql2005"},
{NULL}
};
static unsigned char cursalt[SALT_SIZE];
#ifdef SIMD_COEF_64
static uint64_t (*saved_key)[SHA_BUF_SIZ];
static uint64_t (*crypt_out);
static int max_keys;
static int new_keys;
#else
static char (*saved_key)[(PLAINTEXT_LENGTH + 1) * 2 + SALT_SIZE];
static uint64_t (*crypt_out)[DIGEST_SIZE / 8];
static int *saved_len;
#endif
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
if (strncmp(ciphertext, "0x0200", 6))
return 0;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
for (i = 6; i < CIPHERTEXT_LENGTH; i++) {
if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
//(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) ||
(('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
memcpy(cursalt, salt, SALT_SIZE);
#ifdef SIMD_COEF_64
new_keys = 1;
#endif
}
static void *get_salt(char *ciphertext)
{
static unsigned char *out2;
int l;
if (!out2) out2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
for (l = 0;l<SALT_SIZE;l++)
{
out2[l] = atoi16[ARCH_INDEX(ciphertext[l*2+6])]*16
+ atoi16[ARCH_INDEX(ciphertext[l*2+7])];
}
return out2;
}
static void set_key_enc(char *_key, int index);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key),
MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
8 * sizeof(uint64_t),
MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3);
if (options.target_enc != ISO_8859_1 &&
options.target_enc != ASCII)
self->methods.set_key = set_key_enc;
}
static void done(void)
{
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
#ifdef SIMD_COEF_64
static void clear_keys(void)
{
memset(saved_key, 0, sizeof(*saved_key) * max_keys);
}
#endif
static void set_key(char *_key, int index)
{
#ifndef SIMD_COEF_64
/* ASCII or ISO-8859-1 to UCS-2 */
UTF8 *s = (UTF8*)_key;
UTF16 *d = (UTF16*)saved_key[index];
for (saved_len[index] = 0; s[saved_len[index]]; saved_len[index]++)
#if ARCH_LITTLE_ENDIAN
d[saved_len[index]] = s[saved_len[index]];
#else
d[saved_len[index]] = s[saved_len[index]] << 8;
#endif
d[saved_len[index]] = 0;
saved_len[index] <<= 1;
#else
uint64_t *keybuffer = saved_key[index];
unsigned short *w16 = (unsigned short*)keybuffer;
UTF8 *key = (UTF8*)_key;
int len = 0;
while ((*w16++ = *key++))
len++;
keybuffer[15] = ((len << 1) + SALT_SIZE) << 3;
new_keys = 1;
#if !ARCH_LITTLE_ENDIAN
alter_endianity_w16(saved_key[index], len<<1);
#endif
#endif
}
static void set_key_enc(char *_key, int index)
{
#ifndef SIMD_COEF_64
/* Any encoding -> UTF-16 */
saved_len[index] = enc_to_utf16((UTF16*)saved_key[index],
PLAINTEXT_LENGTH,
(unsigned char*)_key, strlen(_key));
if (saved_len[index] < 0)
saved_len[index] = strlen16((UTF16*)saved_key[index]);
saved_len[index] <<= 1;
#else
uint64_t *keybuffer = saved_key[index];
UTF16 *w16 = (UTF16*)keybuffer;
UTF8 *key = (UTF8*)_key;
int len;
len = enc_to_utf16(w16, PLAINTEXT_LENGTH, key, strlen(_key));
if (len < 0)
len = strlen16(w16);
keybuffer[15] = ((len << 1) + SALT_SIZE) << 3;
new_keys = 1;
#endif
}
static char *get_key(int index)
{
#ifndef SIMD_COEF_64
((UTF16*)saved_key[index])[saved_len[index]>>1] = 0;
return (char*)utf16_to_enc((UTF16*)saved_key[index]);
#else
uint64_t *keybuffer = saved_key[index];
UTF16 *w16 = (UTF16*)keybuffer;
static UTF16 out[PLAINTEXT_LENGTH + 1];
unsigned int i, len;
len = ((keybuffer[15] >> 3) - SALT_SIZE) >> 1;
for (i = 0; i < len; i++)
out[i] = w16[i];
out[i] = 0;
return (char*)utf16_to_enc(out);
#endif
}
static void *get_binary(char *ciphertext)
{
static uint64_t out[SHA_BUF_SIZ];
char *realcipher = (char*)out;
int i;
for (i = 0;i<DIGEST_SIZE;i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+14])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+15])];
#ifdef SIMD_COEF_64
#if ARCH_LITTLE_ENDIAN==1
alter_endianity_to_BE64 (realcipher, DIGEST_SIZE/8);
#endif
#ifdef REVERSE_STEPS
sha512_reverse(out);
#endif
#endif
return (void *)realcipher;
}
#define BASE_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || PLAINTEXT_LENGTH > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_64
if (new_keys) {
int i;
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) {
uint64_t *keybuffer = saved_key[index + i];
unsigned char *wucp = (unsigned char*)keybuffer;
int j, len = (keybuffer[15] >> 3) - SALT_SIZE;
if (len >= 0)
for (j = 0; j < SALT_SIZE; j++)
wucp[len + j] = cursalt[j];
wucp[len + 4] = 0x80;
}
}
SIMDSHA512body(&saved_key[index], &crypt_out[BASE_IDX], NULL, SSEi_REVERSE_STEPS | SSEi_FLAT_IN);
#else
SHA512_CTX ctx;
memcpy(saved_key[index]+saved_len[index], cursalt, SALT_SIZE);
SHA512_Init(&ctx );
SHA512_Update(&ctx, saved_key[index], saved_len[index]+SALT_SIZE );
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
#ifdef SIMD_COEF_64
new_keys = 0;
#endif
return count;
}
#define COMMON_GET_HASH_SIMD64 8
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
static int binary_hash_0(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_6; }
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((uint64_t*)binary)[0] == crypt_out[HASH_IDX])
return 1;
#else
if ( ((uint64_t*)binary)[0] == crypt_out[index][0] )
return 1;
#endif
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
return (((uint64_t*)binary)[0] == crypt_out[HASH_IDX]);
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
uint64_t *binary = get_binary(source);
#if SIMD_COEF_64
char *key = get_key(index);
UTF16 wkey[PLAINTEXT_LENGTH];
SHA512_CTX ctx;
uint64_t crypt_out[DIGEST_SIZE / sizeof(uint64_t)];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (len < 0)
len = strlen16(wkey);
len *= 2;
SHA512_Init(&ctx);
SHA512_Update(&ctx, wkey, len);
SHA512_Update(&ctx, cursalt, SALT_SIZE);
SHA512_Final((unsigned char*)crypt_out, &ctx);
#if ARCH_LITTLE_ENDIAN==1
alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8);
#endif
#ifdef REVERSE_STEPS
sha512_reverse(crypt_out);
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
#else
return !memcmp(binary, crypt_out[index], DIGEST_SIZE);
#endif
}
static int salt_hash(void *salt)
{
// The >> 8 gave much better distribution on a huge set I analysed
// although that was mssql05
return (*((uint32_t *)salt) >> 8) & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mssql12 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
irbuilder_unroll_unroll_partial_factor.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_partial_factor_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 2
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 2
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]]
// CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]]
// CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 2
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 2, %[[TMP12]]
// CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP19]], %[[TMP22]]
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP25]]
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_factor_for(float *a, float *b, float *c, float *d) {
#pragma omp for
#pragma omp unroll partial(2)
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 2}
|
dist.c | //#include <mpi.h>
#include <unistd.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/time.h>
#include <omp.h>
#define MB 1024*1024
double omp_get_wtime() {
struct timeval currentTime;
gettimeofday(¤tTime, NULL);
return (double)(currentTime.tv_sec) + (1.0E-06)*(double)(currentTime.tv_usec);
}
char **split(char *str, char div, int num_args);
char *space = "\n-------------------------------------------------\n";
typedef enum disturbance_mode_t {
compute = 0,
memory = 1,
communication = 2,
} disturbance_mode;
disturbance_mode d_mode = compute;
int window_comp = 10;
int window_pause = 5;
int window_size_min = 1;
int window_size_max = 10;
bool use_random = false;
int rank_number = 0;
int use_multiple_cores = 1;
int use_ram = 1;
int seed = 0;
void compute_kernel()
{
int i, j, v;
int n = 500000000;
int length = 8;
float a[length];
float c[length];
for (i = 0; i < length; i++)
{
a[i] = i;
c[i] = 1.0+i*0.01;
}
if (window_pause == 0) {
while(true)
{
for (i = 0; i < n; i++)
for (v = 0; v < length; v++)
a[v]=a[v]+c[v]*i;
for (v = 0; v < length; v++)
a[v]=(double)(((int)a[v])%length);
}
}
double passed_time = 0.0;
while(true)
{
double tmp_time = omp_get_wtime();
for (i = 0; i < n; i++)
for (v = 0; v < length; v++)
a[v]=a[v]+c[v]*i;
for (v = 0; v < length; v++)
a[v]=(double)(((int)a[v])%length);
passed_time += omp_get_wtime()-tmp_time;
//printf("%f\n",passed_time);
if(passed_time > window_comp)
{
//printf("go to sleep\n");
passed_time -= window_comp;
sleep(window_pause);
//printf("wake up\n");
}
}
}
void memory_kernel()
{
long long size_of_ram_usage = use_ram/use_multiple_cores/2;
printf ("Allocating %dMB of RAM\n",size_of_ram_usage*2);
void *p = malloc(size_of_ram_usage*MB);
void *q = malloc(size_of_ram_usage*MB);
int i = 0;
if (window_pause == 0) {
memset(p, 1, size_of_ram_usage*MB);
memcpy(q, p, size_of_ram_usage*MB);
while(true)
{
memset(p, i, size_of_ram_usage*MB);
memcpy(q, p, size_of_ram_usage*MB);
//sleep(1);
if (i == 0)
i = 1;
else
i = 0;
}
return;
}
double passed_time = 0.0;
while(true)
{
double tmp_time = omp_get_wtime();
memset(p, i, size_of_ram_usage*MB);
memcpy(q, p, size_of_ram_usage*MB);
passed_time += omp_get_wtime()-tmp_time;
//printf("Time Passed: %f\n",passed_time);
if (i == 0)
i = 1;
else
i = 0;
if(passed_time > window_comp)
{
passed_time -= window_comp;
free(p);
free(q);
sleep(window_pause);
p = malloc(size_of_ram_usage*MB);
q = malloc(size_of_ram_usage*MB);
}
}
}
void communication_kernel()
{
while(true)
{
}
}
int main(int argc, char *argv[])
{
int i, k;
int iMyRank, iNumProcs;
int provided;
/*int requested = MPI_THREAD_MULTIPLE;
MPI_Init_thread(&argc, &argv, requested, &provided);
MPI_Comm_size(MPI_COMM_WORLD, &iNumProcs);
MPI_Comm_rank(MPI_COMM_WORLD, &iMyRank);*/
for (i = 1; i < argc; i++)
{
char **s = split(argv[i], '=', 2);
if(strcmp(s[0], "--type") == 0)
{
if(strcmp(s[1], "compute") == 0)
{
d_mode = 0;
}
else if(strcmp(s[1], "memory") == 0)
{
d_mode = 1;
}
else if(strcmp(s[1], "communication") == 0)
{
d_mode = 2;
} else
{
printf(space);
printf("--type was not set correctly!\n");
printf("please try one of the following:\n");
printf("\tcompute\n\tmemory\n\tcommunication");
printf(space);
continue;
}
printf("--type is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--window_comp") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--window_comp was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
window_comp = a;
printf("--window_comp is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--window_size_max") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--window_size_max was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
window_size_max = a;
printf("--window_size_max is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--window_size_min") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--window_size_min was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
window_size_min = a;
printf("--window_size_min is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--window_pause") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--window_pause was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
window_pause = a;
printf("--window_pause is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--use_multiple_cores") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--use_multiple_cores was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
use_multiple_cores = a;
printf("--use_multiple_cores is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--use_random") == 0)
{if(strcmp(s[1], "true") == 0)
{
use_random = true;
}
else if(strcmp(s[1], "false") == 0){
use_random = false;
}
else
{
printf(space);
printf("--use_random was not set correctly!\n");
printf("please try one of the following:\n");
printf("\ttrue\n\tfalse");
printf(space);
continue;
}
printf("--use_random is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--rank_number") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--rank_number was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
rank_number = a;
printf("--rank_number is set to: %s\n", s[1]);
}
else if(strcmp(s[0], "--use_ram") == 0)
{
int a = atoi(s[1]);
if (a == 0 && s[1] != "0" || a < 0)
{
printf(space);
printf("--use_ram was not set correctly!\n");
printf("please only use positiv integers!");
printf(space);
continue;
}
use_ram = a;
printf("--use_ram is set to: %s\n", s[1]);
}
else if(strcmp(argv[i], "--help") == 0)
{
printf("Possible arguments are:\n");
printf("\t--type\n");
printf("\t--window_comp\t in seconds\n");
printf("\t--window_pause\t in seconds\n");
printf("\t--window_size_min\t in seconds\n");
printf("\t--window_size_max\t in seconds\n");
printf("\t--use_random\n");
printf("\t--use_multiple_cores\n");
printf("\t--rank_number\n");
printf("\t--use_ram\t in MB\n");
return 0;
}
}
if (use_random)
{
seed = (rank_number+1)*42;
srand(seed);
int random = rand();
printf("Seed = %d, random = %d\n", seed, random);
window_comp = random % (window_size_max-window_size_min) + window_size_min;
printf("New window comp set to: %d\n", window_comp);
}
if (window_comp <= 0)
{
printf("window_comp is negative or zero, program will terminate\n");
return 0;
}
int num_threads = use_multiple_cores;
#pragma omp parallel num_threads(use_multiple_cores)
{
printf("started thread: %d\n", omp_get_thread_num());
switch (d_mode)
{
case compute:
compute_kernel();
break;
case memory:
memory_kernel();
break;
case communication:
communication_kernel();
break;
default:
break;
}
}
//MPI_Finalize();
}
char **split(char *str, char div, int num_args)
{
char **result = (char**)malloc(sizeof(char)*num_args);
int i;
for (i = 0; i < num_args; i++)
{
result[i] = (char*)malloc(strlen(str)*sizeof(char));
}
int j = 0;
int k = 0;
for(i = 0; i < strlen(str); i++)
{
if (str[i] == div)
{
j = j + 1;
k = 0;
if (j >= num_args) {
return result;
}
}
else
{
result[j][k] = str[i];
k = k + 1;
}
}
return result;
} |
3D.c | #include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <string.h>
#define STR_SIZE (256)
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016; float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void fatal(char *s)
{
fprintf(stderr, "Error: %s\n", s);
}
void readinput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i,j,k;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
fatal( "The file was not opened" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
for (k=0; k <= layers-1; k++)
{
if (fgets(str, STR_SIZE, fp) == NULL) fatal("Error reading file\n");
if (feof(fp))
fatal("not enough lines in file");
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j+k*grid_rows*grid_cols] = val;
}
fclose(fp);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, int layers, char *file) {
int i,j,k, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
for (k=0; k < layers; k++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j+k*grid_rows*grid_cols]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void computeTempCPU(float *pIn, float* tIn, float *tOut,
int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{ float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
int c,w,e,n,s,b,t;
int x,y,z;
int i = 0;
do{
for(z = 0; z < nz; z++)
for(y = 0; y < ny; y++)
for(x = 0; x < nx; x++)
{
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx - 1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny - 1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz - 1) ? c : c + nx * ny;
tOut[c] = tIn[c]*cc + tIn[n]*cn + tIn[s]*cs + tIn[e]*ce + tIn[w]*cw + tIn[t]*ct + tIn[b]*cb + (dt/Cap) * pIn[c] + ct*amb_temp;
}
float *temp = tIn;
tIn = tOut;
tOut = temp;
i++;
}
while(i < numiter);
}
float accuracy(float *arr1, float *arr2, int len)
{
float err = 0.0;
int i;
#pragma omp parallel for firstprivate(len ,arr2 ,arr1 ,i ) reduction(+:err)
for(i = 0; i < len; i++)
{
err += (arr1[i]-arr2[i]) * (arr1[i]-arr2[i]);
}
return (float)sqrt(err/len);
}
void computeTempOMP(float *pIn, float* tIn, float *tOut,
int nx, int ny, int nz, float Cap,
float Rx, float Ry, float Rz,
float dt, int numiter)
{
float ce, cw, cn, cs, ct, cb, cc;
float stepDivCap = dt / Cap;
ce = cw =stepDivCap/ Rx;
cn = cs =stepDivCap/ Ry;
ct = cb =stepDivCap/ Rz;
cc = 1.0 - (2.0*ce + 2.0*cn + 3.0*ct);
{
int count = 0;
float *tIn_t = tIn;
float *tOut_t = tOut;
printf("%d threads running\n", omp_get_num_threads());
do {
int z;
#pragma omp parallel for
for (z = 0; z < nz; z++) {
int y;
for (y = 0; y < ny; y++) {
int x;
for (x = 0; x < nx; x++) {
int c, w, e, n, s, b, t;
c = x + y * nx + z * nx * ny;
w = (x == 0) ? c : c - 1;
e = (x == nx-1) ? c : c + 1;
n = (y == 0) ? c : c - nx;
s = (y == ny-1) ? c : c + nx;
b = (z == 0) ? c : c - nx * ny;
t = (z == nz-1) ? c : c + nx * ny;
tOut_t[c] = cc * tIn_t[c] + cw * tIn_t[w] + ce * tIn_t[e]
+ cs * tIn_t[s] + cn * tIn_t[n] + cb * tIn_t[b] + ct * tIn_t[t]+(dt/Cap) * pIn[c] + ct*amb_temp;
}
}
}
float *t = tIn_t;
tIn_t = tOut_t;
tOut_t = t;
count++;
} while (count < numiter);
}
return;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <rows/cols> <layers> <iterations> <powerFile> <tempFile> <outputFile>\n", argv[0]);
fprintf(stderr, "\t<rows/cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<layers> - number of layers in the grid (positive integer)\n");
fprintf(stderr, "\t<iteration> - number of iterations\n");
fprintf(stderr, "\t<powerFile> - name of the file containing the initial power values of each cell\n");
fprintf(stderr, "\t<tempFile> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<outputFile - output file\n");
exit(1);
}
int main(int argc, char** argv)
{
if (argc != 7)
{
usage(argc,argv);
}
char *pfile, *tfile, *ofile;// *testFile;
int iterations = atoi(argv[3]);
pfile = argv[4];
tfile = argv[5];
ofile = argv[6];
//testFile = argv[7];
int numCols = atoi(argv[1]);
int numRows = atoi(argv[1]);
int layers = atoi(argv[2]);
/* calculating parameters*/
float dx = chip_height/numRows;
float dy = chip_width/numCols;
float dz = t_chip/layers;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * dx * dy;
float Rx = dy / (2.0 * K_SI * t_chip * dx);
float Ry = dx / (2.0 * K_SI * t_chip * dy);
float Rz = dz / (K_SI * dx * dy);
// cout << Rx << " " << Ry << " " << Rz << endl;
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float dt = PRECISION / max_slope;
float *powerIn, *tempOut, *tempIn, *tempCopy;// *pCopy;
// float *d_powerIn, *d_tempIn, *d_tempOut;
int size = numCols * numRows * layers;
powerIn = (float*)calloc(size, sizeof(float));
tempCopy = (float*)malloc(size * sizeof(float));
tempIn = (float*)calloc(size,sizeof(float));
tempOut = (float*)calloc(size, sizeof(float));
//pCopy = (float*)calloc(size,sizeof(float));
float* answer = (float*)calloc(size, sizeof(float));
// outCopy = (float*)calloc(size, sizeof(float));
readinput(powerIn,numRows, numCols, layers,pfile);
readinput(tempIn, numRows, numCols, layers, tfile);
memcpy(tempCopy,tempIn, size * sizeof(float));
struct timeval start, stop;
float time;
gettimeofday(&start,NULL);
computeTempOMP(powerIn, tempIn, tempOut, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations);
gettimeofday(&stop,NULL);
time = (stop.tv_usec-start.tv_usec)*1.0e-6 + stop.tv_sec - start.tv_sec;
computeTempCPU(powerIn, tempCopy, answer, numCols, numRows, layers, Cap, Rx, Ry, Rz, dt,iterations);
float acc = accuracy(tempOut,answer,numRows*numCols*layers);
printf("Time: %.3f (s)\n",time);
printf("Accuracy: %e\n",acc);
writeoutput(tempOut,numRows, numCols, layers, ofile);
free(tempIn);
free(tempOut); free(powerIn);
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) {
for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(4*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(4*t3+Nx-9,128));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unaryop__minv_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_fp64
// op(A') function: GB_tran__minv_int16_fp64
// C type: int16_t
// A type: double
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, aij) \
int16_t z ; GB_CAST_SIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_fp64
(
int16_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ParticleBConds3DSoa.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H
#define QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H
#include <config.h>
#include <algorithm>
#include <Lattice/CrystalLattice.h>
namespace qmcplusplus
{
/** specialization for an open 3D
*/
template<class T>
struct DTD_BConds<T, 3, SUPERCELL_OPEN + SOA_OFFSET>
{
/** constructor: doing nothing */
inline DTD_BConds(const CrystalLattice<T, 3>& lat) {}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
dx[iat] = px[iat] - x0;
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
dx[iat] = px[iat] - x0;
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D, orthorombic cell
*/
template<class T>
struct DTD_BConds<T, 3, PPPO + SOA_OFFSET>
{
T Linv0, L0, Linv1, L1, Linv2, L2, r2max, dummy;
inline DTD_BConds(const CrystalLattice<T, 3>& lat)
: Linv0(lat.OneOverLength[0]),
L0(lat.Length[0]),
Linv1(lat.OneOverLength[1]),
L1(lat.Length[1]),
Linv2(lat.OneOverLength[2]),
L2(lat.Length[2]),
r2max(lat.CellRadiusSq),
dummy(T())
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T x = (px[iat] - x0) * Linv0;
const T y = (py[iat] - y0) * Linv1;
const T z = (pz[iat] - z0) * Linv2;
dx[iat] = L0 * (x - round(x));
dy[iat] = L1 * (y - round(y));
dz[iat] = L2 * (z - round(z));
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T x = (px[iat] - x0) * Linv0;
const T y = (py[iat] - y0) * Linv1;
const T z = (pz[iat] - z0) * Linv2;
dx[iat] = L0 * (x - round(x));
dy[iat] = L1 * (y - round(y));
dz[iat] = L2 * (z - round(z));
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D general cell with wigner-seitz==simulation cell
*
* Skip image cells.
*/
template<class T>
struct DTD_BConds<T, 3, PPPS + SOA_OFFSET>
{
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r20(lat.R(6)),
r01(lat.R(1)),
r11(lat.R(4)),
r21(lat.R(7)),
r02(lat.R(2)),
r12(lat.R(5)),
r22(lat.R(8)),
g00(lat.G(0)),
g10(lat.G(3)),
g20(lat.G(6)),
g01(lat.G(1)),
g11(lat.G(4)),
g21(lat.G(7)),
g02(lat.G(2)),
g12(lat.G(5)),
g22(lat.G(8))
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T displ_2 = pz[iat] - z0;
T ar_0 = displ_0 * g00 + displ_1 * g10 + displ_2 * g20;
T ar_1 = displ_0 * g01 + displ_1 * g11 + displ_2 * g21;
T ar_2 = displ_0 * g02 + displ_1 * g12 + displ_2 * g22;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
ar_2 -= round(ar_2);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
dy[iat] = ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
dz[iat] = ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T displ_2 = pz[iat] - z0;
T ar_0 = displ_0 * g00 + displ_1 * g10 + displ_2 * g20;
T ar_1 = displ_0 * g01 + displ_1 * g11 + displ_2 * g21;
T ar_2 = displ_0 * g02 + displ_1 * g12 + displ_2 * g22;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
ar_2 -= round(ar_2);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
dy[iat] = ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
dz[iat] = ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D general cell
*
* Wigner-Seitz cell radius > simulation cell radius
* Need to check image cells
*/
template<class T>
struct DTD_BConds<T, 3, PPPG + SOA_OFFSET>
{
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
TinyVector<TinyVector<T, 8>, 3> corners;
DTD_BConds(const CrystalLattice<T, 3>& lat)
{
TinyVector<TinyVector<T, 3>, 3> rb;
rb[0] = lat.a(0);
rb[1] = lat.a(1);
rb[2] = lat.a(2);
find_reduced_basis(rb);
r00 = rb[0][0];
r10 = rb[1][0];
r20 = rb[2][0];
r01 = rb[0][1];
r11 = rb[1][1];
r21 = rb[2][1];
r02 = rb[0][2];
r12 = rb[1][2];
r22 = rb[2][2];
Tensor<T, 3> rbt;
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
rbt(i, j) = rb[i][j];
Tensor<T, 3> g = inverse(rbt);
g00 = g(0);
g10 = g(3);
g20 = g(6);
g01 = g(1);
g11 = g(4);
g21 = g(7);
g02 = g(2);
g12 = g(5);
g22 = g(8);
constexpr T minusone(-1);
constexpr T zero(0);
for (int idim = 0; idim < 3; idim++)
{
auto& corners_dim = corners[idim];
corners_dim[0] = zero;
corners_dim[1] = minusone * (rb[0][idim]);
corners_dim[2] = minusone * (rb[1][idim]);
corners_dim[3] = minusone * (rb[2][idim]);
corners_dim[4] = minusone * (rb[0][idim] + rb[1][idim]);
corners_dim[5] = minusone * (rb[0][idim] + rb[2][idim]);
corners_dim[6] = minusone * (rb[1][idim] + rb[2][idim]);
corners_dim[7] = minusone * (rb[0][idim] + rb[1][idim] + rb[2][idim]);
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
const auto& cellx = corners[0];
const auto& celly = corners[1];
const auto& cellz = corners[2];
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T displ_2 = (pz[iat] - z0) * flip;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10 + displ_2 * g20);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11 + displ_2 * g21);
const T ar_2 = -std::floor(displ_0 * g02 + displ_1 * g12 + displ_2 * g22);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
const T delz = displ_2 + ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
T rmin = delx * delx + dely * dely + delz * delz;
int ic = 0;
#pragma unroll(7)
for (int c = 1; c < 8; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T z = delz + cellz[c];
const T r2 = x * x + y * y + z * z;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = flip * (delz + cellz[ic]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
const auto& cellx = corners[0];
const auto& celly = corners[1];
const auto& cellz = corners[2];
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T displ_2 = (pz[iat] - z0) * flip;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10 + displ_2 * g20);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11 + displ_2 * g21);
const T ar_2 = -std::floor(displ_0 * g02 + displ_1 * g12 + displ_2 * g22);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
const T delz = displ_2 + ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
T rmin = delx * delx + dely * dely + delz * delz;
int ic = 0;
#pragma unroll(7)
for (int c = 1; c < 8; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T z = delz + cellz[c];
const T r2 = x * x + y * y + z * z;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = flip * (delz + cellz[ic]);
}
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNG + SOA_OFFSET>
{
T g00, g10, g01, g11;
T r00, r10, r01, r11;
TinyVector<TinyVector<T, 3>, 3> rb;
TinyVector<TinyVector<T, 4>, 2> corners;
DTD_BConds(const CrystalLattice<T, 3>& lat)
{
rb[0] = lat.a(0);
rb[1] = lat.a(1);
rb[2] = lat.a(2); //rb[2]=0.0;
r00 = rb[0][0];
r10 = rb[1][0];
r01 = rb[0][1];
r11 = rb[1][1];
g00 = lat.G(0);
g10 = lat.G(3);
g01 = lat.G(1);
g11 = lat.G(4);
T minusone = -1.0;
for (int idim = 0; idim < 2; idim++)
{
auto& corners_dim = corners[idim];
corners_dim[0] = T(0);
corners_dim[1] = minusone * (rb[0][idim]);
corners_dim[2] = minusone * (rb[1][idim]);
corners_dim[3] = minusone * (rb[0][idim] + rb[1][idim]);
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
const auto& cellx = corners[0];
const auto& celly = corners[1];
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T delz = pz[iat] - z0;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11;
T rmin = delx * delx + dely * dely;
int ic = 0;
#pragma unroll(3)
for (int c = 1; c < 4; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T r2 = x * x + y * y;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin + delz * delz);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = delz;
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
const auto& cellx = corners[0];
const auto& celly = corners[1];
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T delz = pz[iat] - z0;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11;
T rmin = delx * delx + dely * dely;
int ic = 0;
#pragma unroll(3)
for (int c = 1; c < 4; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T r2 = x * x + y * y;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin + delz * delz);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = delz;
}
}
};
/** specialization for a slab, orthorombic cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNO + SOA_OFFSET>
{
T Linv0, L0, Linv1, L1;
inline DTD_BConds(const CrystalLattice<T, 3>& lat)
: Linv0(lat.OneOverLength[0]), L0(lat.Length[0]), Linv1(lat.OneOverLength[1]), L1(lat.Length[1])
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
T y = (py[iat] - y0) * Linv1;
dy[iat] = L1 * (y - round(y));
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
T y = (py[iat] - y0) * Linv1;
dy[iat] = L1 * (y - round(y));
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNS + SOA_OFFSET>
{
T r00, r10, r01, r11;
T g00, g10, g01, g11;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r01(lat.R(1)),
r11(lat.R(4)),
g00(lat.G(0)),
g10(lat.G(3)),
g01(lat.G(1)),
g11(lat.G(4))
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T ar_0 = displ_0 * g00 + displ_1 * g10;
T ar_1 = displ_0 * g01 + displ_1 * g11;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10;
dy[iat] = ar_0 * r01 + ar_1 * r11;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T ar_0 = displ_0 * g00 + displ_1 * g10;
T ar_1 = displ_0 * g01 + displ_1 * g11;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10;
dy[iat] = ar_0 * r01 + ar_1 * r11;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a wire
*/
template<class T>
struct DTD_BConds<T, 3, SUPERCELL_WIRE + SOA_OFFSET>
{
T Linv0, L0;
inline DTD_BConds(const CrystalLattice<T, 3>& lat) : Linv0(lat.OneOverLength[0]), L0(lat.Length[0]) {}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
#pragma omp parallel for simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D general cell
*
* Slow method and not used unless one needs to check if faster methods fail
*/
template<class T>
struct DTD_BConds<T, 3, PPPX + SOA_OFFSET>
{
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
T r2max;
TinyVector<TinyVector<T, 26>, 3> nextcells;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r20(lat.R(6)),
r01(lat.R(1)),
r11(lat.R(4)),
r21(lat.R(7)),
r02(lat.R(2)),
r12(lat.R(5)),
r22(lat.R(8)),
g00(lat.G(0)),
g10(lat.G(3)),
g20(lat.G(6)),
g01(lat.G(1)),
g11(lat.G(4)),
g21(lat.G(7)),
g02(lat.G(2)),
g12(lat.G(5)),
g22(lat.G(8)),
r2max(lat.CellRadiusSq)
{
const auto& cellx = nextcells[0];
const auto& celly = nextcells[1];
const auto& cellz = nextcells[2];
int ic = 0;
for (int i = -1; i <= 1; ++i)
for (int j = -1; j <= 1; ++j)
for (int k = -1; k <= 1; ++k)
{
if (i == 0 && j == 0 && j == 0)
continue; //exclude zero
cellx[ic] = i * r00 + j * r10 + k * r20;
celly[ic] = i * r01 + j * r11 + k * r21;
cellz[ic] = i * r02 + j * r12 + k * r22;
++ic;
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
APP_ABORT("DTD_BConds<T,3,PPPX> not implemented");
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
//APP_ABORT("DTD_BConds<T, 3, PPPX + SOA_OFFSET>::computeDistancesOffload not implemented");
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNX + SOA_OFFSET>
{
T r00, r10, r01, r11;
T g00, g10, g01, g11;
T r2max;
TinyVector<TinyVector<T, 8>, 3> nextcells;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r01(lat.R(1)),
r11(lat.R(4)),
g00(lat.G(0)),
g10(lat.G(3)),
g01(lat.G(1)),
g11(lat.G(4)),
r2max(lat.CellRadiusSq)
{
const auto& cellx = nextcells[0];
const auto& celly = nextcells[1];
const auto& cellz = nextcells[2];
int ic = 0;
for (int i = -1; i <= 1; ++i)
for (int j = -1; j <= 1; ++j)
{
if (i == 0 && j == 0)
continue; //exclude zero
cellx[ic] = i * r00 + j * r10;
celly[ic] = i * r01 + j * r11;
cellz[ic] = T();
++ic;
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
APP_ABORT("DTD_BConds<T,3,PPNX> not implemented");
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int first,
int last,
int flip_ind = 0)
{
//APP_ABORT("DTD_BConds<T, 3, PPNX + SOA_OFFSET>::computeDistancesOffload not implemented");
}
};
} // namespace qmcplusplus
#endif // OHMMS_PARTICLE_BCONDS_3D_H
|
GB_Global.c | //------------------------------------------------------------------------------
// GB_Global: global values in GraphBLAS
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All Global storage is declared, initialized, and accessed here. The
// contents of the GB_Global struct are only accessible to functions in this
// file. Global storage is used to keep track of the GraphBLAS mode (blocking
// or non-blocking), for pointers to malloc/calloc/realloc/free functions,
// global matrix options, and other settings.
#include "GB_atomics.h"
//------------------------------------------------------------------------------
// Global storage: for all threads in a user application that uses GraphBLAS
//------------------------------------------------------------------------------
typedef struct
{
void *queue_head ; // TODO in 4.0: delete
//--------------------------------------------------------------------------
// blocking/non-blocking mode, set by GrB_init
//--------------------------------------------------------------------------
GrB_Mode mode ; // GrB_NONBLOCKING or GrB_BLOCKING
bool GrB_init_called ; // true if GrB_init already called
//--------------------------------------------------------------------------
// threading and MKL control
//--------------------------------------------------------------------------
bool use_mkl ; // control usage of Intel MKL
int nthreads_max ; // max number of threads to use
double chunk ; // chunk size for determining # threads to use
//--------------------------------------------------------------------------
// hypersparsity and CSR/CSC format control
//--------------------------------------------------------------------------
double hyper_ratio ; // default hyper_ratio for new matrices
bool is_csc ; // default CSR/CSC format for new matrices
//--------------------------------------------------------------------------
// abort function: only used for debugging
//--------------------------------------------------------------------------
void (* abort_function ) (void) ;
//--------------------------------------------------------------------------
// malloc/calloc/realloc/free: memory management functions
//--------------------------------------------------------------------------
// All threads must use the same malloc/calloc/realloc/free functions.
// They default to the ANSI C11 functions, but can be defined by GxB_init.
void * (* malloc_function ) (size_t) ;
void * (* calloc_function ) (size_t, size_t) ;
void * (* realloc_function ) (void *, size_t) ;
void (* free_function ) (void *) ;
bool malloc_is_thread_safe ; // default is true
//--------------------------------------------------------------------------
// memory usage tracking: for testing and debugging only
//--------------------------------------------------------------------------
// malloc_tracking: default is false. There is no user-accessible API for
// setting this to true. If true, the following statistics are computed.
// If false, all of the following are unused.
// nmalloc: To aid in searching for memory leaks, GraphBLAS keeps track of
// the number of blocks of allocated that have not yet been freed. The
// count starts at zero. GB_malloc_memory and GB_calloc_memory increment
// this count, and free (of a non-NULL pointer) decrements it. realloc
// increments the count it if is allocating a new block, but it does this
// by calling GB_malloc_memory.
// malloc_debug: this is used for testing only (GraphBLAS/Tcov). If true,
// then use malloc_debug_count for testing memory allocation and
// out-of-memory conditions. If malloc_debug_count > 0, the value is
// decremented after each allocation of memory. If malloc_debug_count <=
// 0, the GB_*_memory routines pretend to fail; returning NULL and not
// allocating anything.
bool malloc_tracking ; // true if allocations are being tracked
int64_t nmalloc ; // number of blocks allocated but not freed
bool malloc_debug ; // if true, test memory handling
int64_t malloc_debug_count ; // for testing memory handling
//--------------------------------------------------------------------------
// for testing and development
//--------------------------------------------------------------------------
int64_t hack ; // ad hoc setting (for draft versions only)
bool burble ; // controls GBBURBLE output
//--------------------------------------------------------------------------
// for MATLAB interface only
//--------------------------------------------------------------------------
bool print_one_based ; // if true, print 1-based indices
//--------------------------------------------------------------------------
// CUDA (DRAFT: in progress)
//--------------------------------------------------------------------------
int gpu_count ; // # of GPUs in the system
GrB_Desc_Value gpu_control ; // always, never, or default
double gpu_chunk ; // min problem size for using a GPU
// properties of each GPU:
GB_cuda_device gpu_properties [GB_CUDA_MAX_GPUS] ;
}
GB_Global_struct ;
GB_PUBLIC GB_Global_struct GB_Global ;
GB_Global_struct GB_Global =
{
.queue_head = NULL, // TODO in 4.0: delete
// GraphBLAS mode
.mode = GrB_NONBLOCKING, // default is nonblocking
// initialization flag
.GrB_init_called = false, // GrB_init has not yet been called
// Intel MKL control (DRAFT: in progress)
.use_mkl = false, // if true, exploit the Intel MKL
// max number of threads and chunk size
.nthreads_max = 1,
.chunk = GB_CHUNK_DEFAULT,
// default format
.hyper_ratio = GB_HYPER_DEFAULT,
.is_csc = (GB_FORMAT_DEFAULT != GxB_BY_ROW), // default is GxB_BY_ROW
// abort function for debugging only
.abort_function = abort,
// malloc/calloc/realloc/free functions: default to ANSI C11 functions
.malloc_function = malloc,
.calloc_function = calloc,
.realloc_function = realloc,
.free_function = free,
.malloc_is_thread_safe = true,
// malloc tracking, for testing, statistics, and debugging only
.malloc_tracking = false,
.nmalloc = 0, // memory block counter
.malloc_debug = false, // do not test memory handling
.malloc_debug_count = 0, // counter for testing memory handling
// for testing and development only
.hack = 0,
// diagnostics
.burble = false,
// for MATLAB interface only
.print_one_based = false, // if true, print 1-based indices
// CUDA environment (DRAFT: in progress)
.gpu_count = 0, // # of GPUs in the system
.gpu_control = GxB_DEFAULT, // always, never, or default
.gpu_chunk = GB_GPU_CHUNK_DEFAULT // min problem size for using a GPU
} ;
//==============================================================================
// GB_Global access functions
//==============================================================================
// TODO in 4.0: delete:
GB_PUBLIC
void GB_Global_queue_head_set (void *p) { GB_Global.queue_head = p ; }
GB_PUBLIC
void *GB_Global_queue_head_get (void) { return (GB_Global.queue_head) ; }
//------------------------------------------------------------------------------
// mode
//------------------------------------------------------------------------------
void GB_Global_mode_set (GrB_Mode mode)
{
GB_Global.mode = mode ;
}
GrB_Mode GB_Global_mode_get (void)
{
return (GB_Global.mode) ;
}
//------------------------------------------------------------------------------
// GrB_init_called
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_GrB_init_called_set (bool GrB_init_called)
{
GB_Global.GrB_init_called = GrB_init_called ;
}
GB_PUBLIC // accessed by the MATLAB interface only
bool GB_Global_GrB_init_called_get (void)
{
return (GB_Global.GrB_init_called) ;
}
//------------------------------------------------------------------------------
// nthreads_max
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_nthreads_max_set (int nthreads_max)
{
GB_Global.nthreads_max = GB_IMAX (nthreads_max, 1) ;
}
GB_PUBLIC // accessed by the MATLAB interface only
int GB_Global_nthreads_max_get (void)
{
return (GB_Global.nthreads_max) ;
}
//------------------------------------------------------------------------------
// OpenMP max_threads
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
int GB_Global_omp_get_max_threads (void)
{
return (GB_OPENMP_MAX_THREADS) ;
}
//------------------------------------------------------------------------------
// chunk
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_chunk_set (double chunk)
{
if (chunk <= GxB_DEFAULT) chunk = GB_CHUNK_DEFAULT ;
GB_Global.chunk = fmax (chunk, 1) ;
}
GB_PUBLIC // accessed by the MATLAB interface only
double GB_Global_chunk_get (void)
{
return (GB_Global.chunk) ;
}
//------------------------------------------------------------------------------
// hyper_ratio
//------------------------------------------------------------------------------
void GB_Global_hyper_ratio_set (double hyper_ratio)
{
GB_Global.hyper_ratio = hyper_ratio ;
}
double GB_Global_hyper_ratio_get (void)
{
return (GB_Global.hyper_ratio) ;
}
//------------------------------------------------------------------------------
// use_mkl
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_use_mkl_set (bool use_mkl)
{
GB_Global.use_mkl = use_mkl ;
}
GB_PUBLIC // accessed by the MATLAB interface only
bool GB_Global_use_mkl_get (void)
{
return (GB_Global.use_mkl) ;
}
//------------------------------------------------------------------------------
// is_csc
//------------------------------------------------------------------------------
void GB_Global_is_csc_set (bool is_csc)
{
GB_Global.is_csc = is_csc ;
}
bool GB_Global_is_csc_get (void)
{
return (GB_Global.is_csc) ;
}
//------------------------------------------------------------------------------
// abort_function
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_abort_function_set (void (* abort_function) (void))
{
GB_Global.abort_function = abort_function ;
}
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_abort_function (void)
{
GB_Global.abort_function ( ) ;
}
//------------------------------------------------------------------------------
// malloc_function
//------------------------------------------------------------------------------
void GB_Global_malloc_function_set (void * (* malloc_function) (size_t))
{
GB_Global.malloc_function = malloc_function ;
}
void * GB_Global_malloc_function (size_t size)
{
bool ok = true ;
void *p = NULL ;
if (GB_Global.malloc_is_thread_safe)
{
p = GB_Global.malloc_function (size) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
p = GB_Global.malloc_function (size) ;
}
}
return (ok ? p : NULL) ;
}
//------------------------------------------------------------------------------
// calloc_function
//------------------------------------------------------------------------------
void GB_Global_calloc_function_set (void * (* calloc_function) (size_t, size_t))
{
GB_Global.calloc_function = calloc_function ;
}
void * GB_Global_calloc_function (size_t count, size_t size)
{
bool ok = true ;
void *p = NULL ;
if (GB_Global.malloc_is_thread_safe)
{
p = GB_Global.calloc_function (count, size) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
p = GB_Global.calloc_function (count, size) ;
}
}
return (ok ? p : NULL) ;
}
//------------------------------------------------------------------------------
// realloc_function
//------------------------------------------------------------------------------
void GB_Global_realloc_function_set
(
void * (* realloc_function) (void *, size_t)
)
{
GB_Global.realloc_function = realloc_function ;
}
bool GB_Global_have_realloc_function (void)
{
return (GB_Global.realloc_function != NULL) ;
}
void * GB_Global_realloc_function (void *p, size_t size)
{
bool ok = true ;
void *pnew = NULL ;
if (GB_Global.malloc_is_thread_safe)
{
pnew = GB_Global.realloc_function (p, size) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
pnew = GB_Global.realloc_function (p, size) ;
}
}
return (ok ? pnew : NULL) ;
}
//------------------------------------------------------------------------------
// free_function
//------------------------------------------------------------------------------
void GB_Global_free_function_set (void (* free_function) (void *))
{
GB_Global.free_function = free_function ;
}
void GB_Global_free_function (void *p)
{
if (GB_Global.malloc_is_thread_safe)
{
GB_Global.free_function (p) ;
}
else
{
#pragma omp critical(GB_malloc_protection)
{
GB_Global.free_function (p) ;
}
}
}
//------------------------------------------------------------------------------
// malloc_is_thread_safe
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_Global_malloc_is_thread_safe_set (bool malloc_is_thread_safe)
{
GB_Global.malloc_is_thread_safe = malloc_is_thread_safe ;
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_Global_malloc_is_thread_safe_get (void)
{
return (GB_Global.malloc_is_thread_safe) ;
}
//------------------------------------------------------------------------------
// malloc_tracking
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_Global_malloc_tracking_set (bool malloc_tracking)
{
GB_Global.malloc_tracking = malloc_tracking ;
}
bool GB_Global_malloc_tracking_get (void)
{
return (GB_Global.malloc_tracking) ;
}
//------------------------------------------------------------------------------
// nmalloc
//------------------------------------------------------------------------------
void GB_Global_nmalloc_clear (void)
{
GB_ATOMIC_WRITE
GB_Global.nmalloc = 0 ;
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
int64_t GB_Global_nmalloc_get (void)
{
int64_t nmalloc ;
GB_ATOMIC_READ
nmalloc = GB_Global.nmalloc ;
return (nmalloc) ;
}
void GB_Global_nmalloc_increment (void)
{
GB_ATOMIC_UPDATE
GB_Global.nmalloc++ ;
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_Global_nmalloc_decrement (void)
{
GB_ATOMIC_UPDATE
GB_Global.nmalloc-- ;
}
//------------------------------------------------------------------------------
// malloc_debug
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_Global_malloc_debug_set (bool malloc_debug)
{
GB_ATOMIC_WRITE
GB_Global.malloc_debug = malloc_debug ;
}
bool GB_Global_malloc_debug_get (void)
{
bool malloc_debug ;
GB_ATOMIC_READ
malloc_debug = GB_Global.malloc_debug ;
return (malloc_debug) ;
}
//------------------------------------------------------------------------------
// malloc_debug_count
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_Global_malloc_debug_count_set (int64_t malloc_debug_count)
{
GB_ATOMIC_WRITE
GB_Global.malloc_debug_count = malloc_debug_count ;
}
bool GB_Global_malloc_debug_count_decrement (void)
{
GB_ATOMIC_UPDATE
GB_Global.malloc_debug_count-- ;
int64_t malloc_debug_count ;
GB_ATOMIC_READ
malloc_debug_count = GB_Global.malloc_debug_count ;
return (malloc_debug_count <= 0) ;
}
//------------------------------------------------------------------------------
// hack: for setting an internal value for development only
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
void GB_Global_hack_set (int64_t hack)
{
GB_Global.hack = hack ;
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
int64_t GB_Global_hack_get (void)
{
return (GB_Global.hack) ;
}
//------------------------------------------------------------------------------
// burble: for controlling the burble output
//------------------------------------------------------------------------------
void GB_Global_burble_set (bool burble)
{
GB_Global.burble = burble ;
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
bool GB_Global_burble_get (void)
{
return (GB_Global.burble) ;
}
//------------------------------------------------------------------------------
// for MATLAB interface only
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB interface only
void GB_Global_print_one_based_set (bool onebased)
{
GB_Global.print_one_based = onebased ;
}
GB_PUBLIC // accessed by the MATLAB interface only
bool GB_Global_print_one_based_get (void)
{
return (GB_Global.print_one_based) ;
}
//------------------------------------------------------------------------------
// CUDA (DRAFT: in progress)
//------------------------------------------------------------------------------
void GB_Global_gpu_control_set (GrB_Desc_Value gpu_control)
{
// set the GPU control to always, never, or default
if (GB_Global.gpu_count > 0)
{
// one or more GPUs are available: set gpu_control to
// always, never, or default.
if (gpu_control == GxB_GPU_ALWAYS || gpu_control == GxB_GPU_NEVER)
{
GB_Global.gpu_control = gpu_control ;
}
else
{
GB_Global.gpu_control = GxB_DEFAULT ;
}
}
else
{
// no GPUs available: never use a GPU
GB_Global.gpu_control = GxB_GPU_NEVER ;
}
}
GrB_Desc_Value GB_Global_gpu_control_get (void)
{
// get the GPU control parameter
return (GB_Global.gpu_control) ;
}
void GB_Global_gpu_chunk_set (double gpu_chunk)
{
// set the GPU chunk factor
if (gpu_chunk < 1) gpu_chunk = GB_GPU_CHUNK_DEFAULT ;
GB_Global.gpu_chunk = gpu_chunk ;
}
double GB_Global_gpu_chunk_get (void)
{
// get the GPU chunk factor
return (GB_Global.gpu_chunk) ;
}
bool GB_Global_gpu_count_set (bool enable_cuda)
{
// set the # of GPUs in the system;
// this function is only called once, by GB_init.
#if defined ( GBCUDA )
if (enable_cuda)
{
return (GB_cuda_get_device_count (&GB_Global.gpu_count)) ;
}
else
#endif
{
// no GPUs available, or available but not requested
GB_Global.gpu_count = 0 ;
return (true) ;
}
}
int GB_Global_gpu_count_get (void)
{
// get the # of GPUs in the system
return (GB_Global.gpu_count) ;
}
#define GB_GPU_DEVICE_CHECK(error) \
if (device < 0 || device >= GB_Global.gpu_count) return (error) ;
size_t GB_Global_gpu_memorysize_get (int device)
{
// get the memory size of a specific GPU
GB_GPU_DEVICE_CHECK (0) ; // memory size zero if invalid GPU
return (GB_Global.gpu_properties [device].total_global_memory) ;
}
int GB_Global_gpu_sm_get (int device)
{
// get the # of SMs in a specific GPU
GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU
return (GB_Global.gpu_properties [device].number_of_sms) ;
}
bool GB_Global_gpu_device_properties_get (int device)
{
// get all properties of a specific GPU;
// this function is only called once per GPU, by GB_init.
GB_GPU_DEVICE_CHECK (false) ; // fail if invalid GPU
#if defined ( GBCUDA )
return (GB_cuda_get_device_properties (device,
&(GB_Global.gpu_properties [device]))) ;
#else
// if no GPUs exist, they cannot be queried
return (false) ;
#endif
}
|
nco_ply_lst.c | /* $Header$ */
/* Purpose: Functions that manipulate lists of polygons */
/* Copyright (C) 2018--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License with exceptions described in the LICENSE file */
#include "nco_ply_lst.h"
void
nco_poly_re_org_lst( /* for each poly_sct* in list re-order points so that first point is the leftermost point */
poly_sct **pl_lst,
int arr_nbr)
{
int idx=0;
int jdx=0;
int max_crn_nbr=0;
double *lcl_dp_x;
double *lcl_dp_y;
/* max crn_nbr */
for(idx=0 ; idx<arr_nbr ;idx++)
if( pl_lst[idx]->crn_nbr > max_crn_nbr )
max_crn_nbr = pl_lst[idx]->crn_nbr;
lcl_dp_x=(double*)nco_calloc(max_crn_nbr, sizeof(double));
lcl_dp_y=(double*)nco_calloc(max_crn_nbr, sizeof(double));
for(idx=0; idx<arr_nbr; idx++)
{
int lcl_min=0;
int crn_nbr=pl_lst[idx]->crn_nbr;
double x_min=1.0e-30;
/* de-reference */
poly_sct *pl=pl_lst[idx];
/* find index of min X value */
for(jdx=0; jdx<crn_nbr; jdx++)
if( pl->dp_x[jdx] < x_min )
{ x_min=pl->dp_x[jdx]; lcl_min=jdx;}
/* first point already x_min so do nothing */
if( lcl_min == 0)
continue;
for(jdx=0; jdx<crn_nbr; jdx++)
{
lcl_dp_x[jdx]=pl->dp_x[(jdx+lcl_min)%crn_nbr];
lcl_dp_y[jdx]=pl->dp_y[(jdx+lcl_min)%crn_nbr];
}
/* copy over values */
memcpy(pl->dp_x, lcl_dp_x, (size_t)crn_nbr*sizeof(double));
memcpy(pl->dp_y, lcl_dp_y, (size_t)crn_nbr*sizeof(double));
}
lcl_dp_x=(double*)nco_free(lcl_dp_x);
lcl_dp_y=(double*)nco_free(lcl_dp_y);
return;
}
poly_sct ** /* [O] [nbr] size of array */
nco_poly_lst_mk(
double *area, /* I [sr] Area of source grid */
int *msk, /* I [flg] Mask on source grid */
double *lat_ctr, /* I [dgr] Latitude centers of source grid */
double *lon_ctr, /* I [dgr] Longitude centers of source grid */
double *lat_crn, /* I [dgr] Latitude corners of source grid */
double *lon_crn, /* I [dgr] Longitude corners of source grid */
size_t grd_sz, /* I [nbr] Number of elements in single layer of source grid */
long grd_crn_nbr, /* I [nbr] Maximum number of corners in source gridcell */
nco_grd_lon_typ_enm grd_lon_typ, /* I [num] if not nil then split cells that straddle Greenwich or Dateline */
poly_typ_enm pl_typ,
int *pl_nbr)
{
const char fnc_nm[]="nco_poly_lst_mk()";
int idx=0;
int idx_cnt=0;
int cnt_wrp_good=0;
nco_bool bwrp;
double *lat_ptr=lat_crn;
double *lon_ptr=lon_crn;
/* buffers used in nco-poly_re_org() */
double lcl_dp_x[VP_MAX]={0};
double lcl_dp_y[VP_MAX]={0};
poly_sct *pl;
poly_sct *pl_wrp_left;
poly_sct *pl_wrp_right;
poly_sct **pl_lst;
/* start with twice the grid size as we may be splitting the cells along the Greenwich meridian or dateline */
/* realloc at the end */
pl_lst=(poly_sct**)nco_malloc( sizeof (poly_sct*) * grd_sz *2 );
// printf("About to print poly sct grd_sz=%d grd_crn_nbr=%d\n", grd_sz, grd_crn_nbr);
for(idx=0;idx<grd_sz; idx++)
{
/* check mask and area */
if( msk[idx]==0 || area[idx] == 0.0)
continue;
pl=nco_poly_init_lst(pl_typ, grd_crn_nbr,0, idx, lon_ptr, lat_ptr);
lon_ptr+=(size_t)grd_crn_nbr;
lat_ptr+=(size_t)grd_crn_nbr;
/* if poly is less than a triangle then null is returned*/
if(!pl)
continue;
/* add min max */
nco_poly_minmax_add(pl, grd_lon_typ, False);
nco_poly_re_org(pl, lcl_dp_x, lcl_dp_y);
/* use Charlie's formula */
nco_poly_area_add(pl);
//if(pl->dp_x_minmax[0] <0.0 || (pl->dp_x_minmax[1] - pl->dp_x_minmax[0]) > 30 )
if( !(pl->dp_x_minmax[1] - pl->dp_x_minmax[0] < 180.0
&& lon_ctr[idx] >= pl->dp_x_minmax[0] && lon_ctr[idx] <= pl->dp_x_minmax[1] ))
{
(void)fprintf(stdout, "/***%s: %s: invalid polygon to follow *******?", nco_prg_nm_get(), fnc_nm);
nco_poly_prn(pl, 0);
pl=nco_poly_free(pl);
continue;
}
//fprintf(stdout,"/***** input polygon pl lon center=%f convex=%s\n ********************/\n", lon_ctr[idx], (nco_poly_is_convex(pl) ? "True": "False") );
//nco_poly_prn(pl, 0);
/* check for wrapping -center outside min/max range */
bwrp=( lon_ctr[idx] < pl->dp_x_minmax[0] || lon_ctr[idx] > pl->dp_x_minmax[1] );
if( grd_lon_typ == nco_grd_lon_nil || grd_lon_typ == nco_grd_lon_unk )
{
if( !bwrp )
{
pl_lst[idx_cnt++]=pl;
}
else
{
(void)fprintf(stdout, "%s: polygon(%d) wrapped - but grd_lon_typ not specified \n", nco_prg_nm_get(), idx);
(void)fprintf(stdout, "/*******************************************/\n");
pl=nco_poly_free(pl);
}
continue;
}
/* if we are here then grd_lon_typ specifys a grid type */
if( !bwrp)
{
pl_lst[idx_cnt++]=pl;
}
/* cell width exceeds max so assume wrapping */
else if( nco_poly_wrp_splt(pl, grd_lon_typ, &pl_wrp_left, &pl_wrp_right ) == NCO_NOERR )
{
fprintf(stdout,"/***** pl, wrp_left, wrp_right ********************/\n");
if(pl_wrp_left)
{
nco_poly_re_org(pl_wrp_left, lcl_dp_x, lcl_dp_y);
pl_lst[idx_cnt++]=pl_wrp_left;
nco_poly_prn(pl_wrp_left, 2);
}
if(pl_wrp_right)
{
nco_poly_re_org(pl_wrp_right, lcl_dp_x, lcl_dp_y);
pl_lst[idx_cnt++]=pl_wrp_right;
nco_poly_prn(pl_wrp_right, 2);
}
pl=nco_poly_free(pl);
fprintf(stdout,"/**********************************/\n");
cnt_wrp_good++;
}
else
{
if(nco_dbg_lvl_get() >= nco_dbg_std ){
(void)fprintf(stdout, "%s: split wrapping didn't work on this polygon(%d)\n", nco_prg_nm_get(), idx );
(void)fprintf(stdout, "/********************************/\n");
}
pl=nco_poly_free(pl);
}
}
if(nco_dbg_lvl_get() >= nco_dbg_std )
(void)fprintf(stdout, "%s: %s size input list(%lu), size output list(%d), num of split polygons(%d)\n", nco_prg_nm_get(),fnc_nm, grd_sz, idx_cnt, cnt_wrp_good);
pl_lst=(poly_sct**)nco_realloc( pl_lst, (size_t)idx_cnt * sizeof (poly_sct*) );
*pl_nbr=idx_cnt;
return pl_lst;
}
poly_sct ** /* [O] [nbr] size of array */
nco_poly_lst_mk_rll(
double *area, /* I [sr] Area of source grid */
int *msk, /* I [flg] Mask on source grid */
double *lat_ctr, /* I [dgr] Latitude centers of source grid */
double *lon_ctr, /* I [dgr] Longitude centers of source grid */
double *lat_crn, /* I [dgr] Latitude corners of source grid */
double *lon_crn, /* I [dgr] Longitude corners of source grid */
size_t grd_sz, /* I [nbr] Number of elements in single layer of source grid */
long grd_crn_nbr, /* I [nbr] Maximum number of corners in source gridcell */
nco_grd_lon_typ_enm grd_lon_typ) /* I [num] */
{
int idx=0;
int wrp_cnt=0;
int wrp_y_cnt=0;
int msk_cnt=0;
const char fnc_nm[]="nco_poly_lst_mk_rll()";
nco_bool bwrp;
/* no polar caps on a RLL grid */
nco_bool bchk_caps=False;
double tot_area=0.0;
double *lat_ptr=lat_crn;
double *lon_ptr=lon_crn;
poly_sct *pl=(poly_sct*)NULL_CEWI;
/* contains plain struct sct with bmsk=False; */
poly_sct *pl_msk=((poly_sct*)NULL_CEWI);
poly_sct **pl_lst;
/* list size is grd_sz - invalid or masked polygons are repesented by a poly_sct->bmsk=False */
pl_lst=(poly_sct**)nco_malloc( sizeof (poly_sct*) * grd_sz);
pl_msk=nco_poly_init();
pl_msk->bmsk=False;
/* filter out wrapped lon cells */
if( grd_lon_typ == nco_grd_lon_nil || grd_lon_typ == nco_grd_lon_unk || grd_lon_typ == nco_grd_lon_bb )
bwrp=False;
else
bwrp=True;
// printf("About to print poly sct grd_sz=%d grd_crn_nbr=%d\n", grd_sz, grd_crn_nbr);
for(idx=0;idx<grd_sz; idx++)
{
/* check mask and area */
if( msk[idx]==0 || area[idx] == 0.0 ) {
pl_lst[idx]= nco_poly_dpl(pl_msk);
msk_cnt++;
continue;
}
pl=nco_poly_init_lst(poly_rll, grd_crn_nbr,0, idx, lon_ptr, lat_ptr);
lon_ptr+=(size_t)grd_crn_nbr;
lat_ptr+=(size_t)grd_crn_nbr;
/* if poly is less than a triangle then null is returned*/
if(!pl ) {
if(nco_dbg_lvl_get()>= nco_dbg_dev)
fprintf(stderr, "%s(): WARNING cell(id=%d) less than a triange\n", fnc_nm, idx);
pl_lst[idx]= nco_poly_dpl(pl_msk);
msk_cnt++;
continue;
}
/* add centroid from input */
pl->dp_x_ctr=lon_ctr[idx];
pl->dp_y_ctr=lat_ctr[idx];
/* pop shp */
nco_poly_shp_pop(pl);
/* add min max */
nco_poly_minmax_add(pl, grd_lon_typ, bchk_caps);
/* if coords cannot deal with wrapping */
if( pl->bwrp && bwrp==False )
{
pl=nco_poly_free(pl);
pl_lst[idx]= nco_poly_dpl(pl_msk);
msk_cnt++;
continue;
}
/* The area of an RLL grid needs to be re-calculated as we have to take account of lines of latitude as great circles */
nco_poly_area_add(pl);
/* area NOT set so add to the area - nb this will be eventually written to the map file in nco_map_mk */
if(area[idx]==-1.0)
area[idx]=pl->area;
/* simple center of a rll cell - should always be inside of polygon */
nco_poly_ctr_add(pl, grd_lon_typ);
if(nco_dbg_lvl_get()>= nco_dbg_dev )
if(pl->bwrp)
nco_poly_prn(pl,0);
/* for debugging */
tot_area+=pl->area;
/* for debugging total number of wrapped cells */
wrp_cnt+=pl->bwrp;
pl_lst[idx]=pl;
}
if(nco_dbg_lvl_get() >= nco_dbg_dev )
(void)fprintf(stderr, "%s: %s size input list(%lu), size output list(%lu) total area=%.15e num wrapped= %d num caps=%d num masked=%d\n", nco_prg_nm_get(),fnc_nm, grd_sz, grd_sz, tot_area, wrp_cnt, wrp_y_cnt, msk_cnt);
pl_msk=nco_poly_free(pl_msk);
return pl_lst;
}
poly_sct ** /* [O] [nbr] size of array */
nco_poly_lst_mk_sph(
double *area, /* I [sr] Area of source grid */
int *msk, /* I [flg] Mask on source grid */
double *lat_ctr, /* I [dgr] Latitude centers of source grid */
double *lon_ctr, /* I [dgr] Longitude centers of source grid */
double *lat_crn, /* I [dgr] Latitude corners of source grid */
double *lon_crn, /* I [dgr] Longitude corners of source grid */
size_t grd_sz, /* I [nbr] Number of elements in single layer of source grid */
long grd_crn_nbr, /* I [nbr] Maximum number of corners in source gridcell */
nco_grd_lon_typ_enm grd_lon_typ) /* I [num] */
{
int idx=0;
int wrp_cnt=0;
int wrp_y_cnt=0;
int msk_cnt=0;
const char fnc_nm[]="nco_poly_lst_mk_sph()";
nco_bool bwrp;
/* check to see if cell is a polar cap */
nco_bool bchk_caps=True;
double tot_area=0.0;
double *lat_ptr=lat_crn;
double *lon_ptr=lon_crn;
/* buffers used in nco-poly_re_org() */
double pControl[NBR_SPH];
poly_sct *pl=(poly_sct*)NULL_CEWI;
/* contains plain struct sct with bmsk=False; */
poly_sct *pl_msk=((poly_sct*)NULL_CEWI);
poly_sct **pl_lst;
/* list size is grd_sz - invalid or masked polygons are repesented by a poly_sct->bmsk=False */
pl_lst=(poly_sct**)nco_malloc( sizeof (poly_sct*) * grd_sz);
pl_msk=nco_poly_init();
pl_msk->bmsk=False;
/* filter out wrapped lon cells */
if( grd_lon_typ == nco_grd_lon_nil || grd_lon_typ == nco_grd_lon_unk || grd_lon_typ == nco_grd_lon_bb )
bwrp=False;
else
bwrp=True;
// printf("About to print poly sct grd_sz=%d grd_crn_nbr=%d\n", grd_sz, grd_crn_nbr);
for(idx=0;idx<grd_sz; idx++)
{
/* check mask and area */
if( msk[idx]==0 || area[idx] == 0.0 ) {
pl_lst[idx]= nco_poly_dpl(pl_msk);
msk_cnt++;
continue;
}
pl=nco_poly_init_lst(poly_sph, grd_crn_nbr,0, idx, lon_ptr, lat_ptr);
lon_ptr+=(size_t)grd_crn_nbr;
lat_ptr+=(size_t)grd_crn_nbr;
/* if poly is less than a triangle then null is returned*/
if(!pl ) {
if(nco_dbg_lvl_get()>= nco_dbg_dev)
fprintf(stderr, "%s(): WARNING cell(id=%d) less than a triange\n", fnc_nm, idx);
pl_lst[idx]= nco_poly_dpl(pl_msk);
msk_cnt++;
continue;
}
/* add centroid from input */
pl->dp_x_ctr=lon_ctr[idx];
pl->dp_y_ctr=lat_ctr[idx];
/* pop shp */
nco_poly_shp_pop(pl);
/* add min max */
nco_poly_minmax_add(pl, grd_lon_typ, bchk_caps);
/* if coords cannot deal with wrapping */
if( pl->bwrp && bwrp==False )
{
pl=nco_poly_free(pl);
pl_lst[idx]= nco_poly_dpl(pl_msk);
msk_cnt++;
continue;
}
/* The area of an RLL grid needs to be re-calculated as we have to take account of lines of latitude as great circles */
nco_poly_area_add(pl);
/* area NOT set so add to the area - nb this will be eventually written to the map file in nco_map_mk */
if(area[idx]==-1.0)
area[idx]=pl->area;
/* fxm:2019-06-07 - there is a problem using the polygon center as a control point as
* for some RLL grids the center of a polar triangle can be the pole */
/* The centroid can be outside of a convex polygon
* for nco_sph_intersect_pre() to work correctly it requires a point INSIDE the convex polygon
* So we use a custom function :
* just remember FROM HERE on that the pl->dp_x_ctr, pl->dp_y_ctr iS NOT the Centroid
* */
if(nco_sph_inside_mk(pl,pControl ))
{
pl->dp_x_ctr=R2D(pControl[3]);
pl->dp_y_ctr=R2D(pControl[4]);
}
else
nco_poly_ctr_add(pl, grd_lon_typ);
if(nco_dbg_lvl_get()>= nco_dbg_dev )
if(pl->bwrp)
nco_poly_prn(pl,0);
/* for debugging */
tot_area+=pl->area;
/* for debugging total number of wrapped cells */
wrp_cnt+=pl->bwrp;
wrp_y_cnt+=pl->bwrp_y;
pl_lst[idx]=pl;
}
if(nco_dbg_lvl_get() >= nco_dbg_dev )
(void)fprintf(stderr, "%s: %s size input list(%lu), size output list(%lu) total area=%.15e num wrapped= %d num caps=%d num masked=%d\n", nco_prg_nm_get(),fnc_nm, grd_sz, grd_sz, tot_area, wrp_cnt, wrp_y_cnt, msk_cnt);
pl_msk=nco_poly_free(pl_msk);
return pl_lst;
}
poly_sct **
nco_poly_lst_free(
poly_sct **pl_lst,
int arr_nbr)
{
int idx;
for(idx=0; idx<arr_nbr; idx++)
pl_lst[idx]=nco_poly_free(pl_lst[idx]);
pl_lst=(poly_sct**)nco_free(pl_lst);
return pl_lst;
}
void
nco_poly_set_priority(
int nbr_lst,
KDPriority *list){
int idx;
for(idx=0;idx<nbr_lst;idx++){
list[idx].dist = 1.1;
list[idx].elem = (KDElem*)NULL;
}
return ;
}
poly_sct **
nco_poly_lst_mk_vrl( /* create overlap mesh for crt polygons */
poly_sct **pl_lst_in,
int pl_cnt_in,
KDTree *rtree,
int *pl_cnt_vrl_ret){
/* just duplicate output list to overlap */
size_t idx;
size_t jdx;
int max_nbr_vrl=1000;
int pl_cnt_vrl=0;
//nco_bool bSort=True;
const char fnc_nm[]="nco_poly_mk_vrl()";
/* buffers used in nco-poly_re_org() */
double lcl_dp_x[VP_MAX]={0};
double lcl_dp_y[VP_MAX]={0};
kd_box size;
poly_sct ** pl_lst_vrl=NULL_CEWI;
KDPriority *list;
list = (KDPriority *)nco_calloc(sizeof(KDPriority),(size_t)max_nbr_vrl);
printf("INFO - entered function nco_poly_mk_vrl\n");
/* start main loop over input polygons */
for(idx=0 ; idx<pl_cnt_in ;idx++ )
{
int cnt_vrl=0;
int cnt_vrl_on=0;
(void)nco_poly_set_priority(max_nbr_vrl,list);
/* get bounds of polygon in */
size[KD_LEFT] = pl_lst_in[idx]->dp_x_minmax[0];
size[KD_RIGHT] = pl_lst_in[idx]->dp_x_minmax[1];
size[KD_BOTTOM] = pl_lst_in[idx]->dp_y_minmax[0];
size[KD_TOP] = pl_lst_in[idx]->dp_y_minmax[1];
/* find overlapping polygons */
// cnt_vrl=kd_nearest_intersect(rtree, size, max_nbr_vrl,list,bSort );
/* nco_poly_prn(2, pl_lst_in[idx] ); */
for(jdx=0; jdx <cnt_vrl ;jdx++){
poly_sct *pl_vrl=(poly_sct*)NULL_CEWI;
poly_sct *pl_out=(poly_sct*)list[jdx].elem->item; ;
// nco_poly_prn(2, pl_out);
/* check for polygon in polygon first */
if( nco_crt_poly_in_poly(pl_lst_in[idx], pl_out) == pl_out->crn_nbr )
{
//fprintf(stderr,"%s: using poly_in_poly()\n", fnc_nm);
pl_vrl=nco_poly_dpl(pl_out);
}
else
pl_vrl= nco_poly_vrl_do(pl_lst_in[idx], pl_out, 0, (char*)NULL);
if(pl_vrl){
nco_poly_re_org(pl_vrl, lcl_dp_x, lcl_dp_y);
/* add area */
nco_poly_area_add(pl_vrl);
/* shp not needed */
nco_poly_shp_free(pl_vrl);
pl_lst_vrl=(poly_sct**)nco_realloc(pl_lst_vrl, sizeof(poly_sct*) * (pl_cnt_vrl+1));
pl_lst_vrl[pl_cnt_vrl]=pl_vrl;
pl_cnt_vrl++;
cnt_vrl_on++;
if(nco_poly_is_convex(pl_vrl) == False )
{
fprintf(stderr,"%s: %s vrl polygon convex=0 vrl ,in convex=%d ,out convex=%d\n", nco_prg_nm_get(), fnc_nm, nco_poly_is_convex(pl_lst_in[idx]), nco_poly_is_convex(pl_out) );
nco_poly_prn(pl_vrl, 2);
nco_poly_prn(pl_lst_in[idx], 2);
nco_poly_prn(pl_out, 2);
}
//fprintf(stderr,"Overlap polygon to follow\n");
//nco_poly_prn(2, pl_vrl);
}
}
if( nco_dbg_lvl_get() >= nco_dbg_dev )
(void) fprintf(stderr, "%s: total overlaps=%d for polygon %lu - potential overlaps=%d actual overlaps=%d\n", nco_prg_nm_get(), pl_cnt_vrl, idx, cnt_vrl, cnt_vrl_on);
}
list = (KDPriority *)nco_free(list);
/* return size of list */
*pl_cnt_vrl_ret=pl_cnt_vrl;
return pl_lst_vrl;
}
poly_sct **
nco_poly_lst_mk_vrl_sph( /* create overlap mesh for sph polygons */
poly_sct **pl_lst_in,
int pl_cnt_in,
nco_grd_lon_typ_enm grd_lon_typ,
KDTree **tree,
int nbr_tr,
int *pl_cnt_vrl_ret){
/* just duplicate output list to overlap */
const char fnc_nm[]="nco_poly_lst_mk_vrl_sph()";
nco_bool bDirtyRats=False;
nco_bool bSort=True;
int max_nbr_vrl=(NCO_VRL_BLOCKSIZE);
int pl_cnt_vrl=0;
int thr_idx=0;
int pl_cnt_dbg=0;
int tot_nan_cnt=0;
int tot_wrp_cnt=0;
/* approx number of input cells each thread will process */
int thr_quota;
/* reporting step */
int thr_quota_step;
poly_typ_enm pl_typ;
size_t idx;
int lcl_thr_nbr;
omp_mem_sct *mem_lst=NULL_CEWI;
double tot_area=0.0;
poly_sct ** pl_lst_vrl=NULL_CEWI;
poly_sct **pl_lst_dbg=NULL_CEWI;
FILE * const fp_stderr=stderr;
pl_typ=pl_lst_in[0]->pl_typ;
lcl_thr_nbr=omp_get_max_threads();
mem_lst=(omp_mem_sct*)nco_malloc(sizeof(omp_mem_sct)*lcl_thr_nbr);
for(idx=0;idx<lcl_thr_nbr;idx++)
{
mem_lst[idx].pl_lst=NULL_CEWI;
mem_lst[idx].blk_nbr=0;
mem_lst[idx].pl_cnt=0;
mem_lst[idx].kd_list=(KDPriority *)nco_calloc(sizeof(KDPriority),(size_t)(NCO_VRL_BLOCKSIZE));
mem_lst[idx].kd_cnt=0;
mem_lst[idx].kd_blk_nbr=1;
mem_lst[idx].idx_cnt=0;
}
thr_quota=pl_cnt_in/lcl_thr_nbr;
thr_quota_step=thr_quota/20;
if( thr_quota_step <2000 )
thr_quota_step=2000;
/* NB: "OpenMP notes" section of nco_rgr.c has detailed discussion of these settings
Henry, please keep the variables in alphabetical order within a clause and remember to update Intel */
#ifdef __GNUG__
# define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ )
# if GCC_LIB_VERSION < 490
# define GXX_OLD_OPENMP_SHARED_TREATMENT 1
# endif /* 480 */
#endif /* !__GNUC__ */
#if defined(__INTEL_COMPILER)
# pragma omp parallel for default(none) private(idx,thr_idx) shared(bDirtyRats,bSort,fnc_nm,grd_lon_typ,max_nbr_vrl,mem_lst,nbr_tr,pl_cnt_in,pl_lst_in,pl_cnt_dbg,pl_lst_dbg,pl_typ,tree,stderr,thr_quota,thr_quota_step,tot_area,tot_nan_cnt,tot_wrp_cnt)
#else /* !__INTEL_COMPILER */
# ifdef GXX_OLD_OPENMP_SHARED_TREATMENT
# pragma omp parallel for default(none) private(idx,thr_idx) shared(bDirtyRats,bSort,grd_lon_typ,max_nbr_vrl,nbr_tr,pl_cnt_dbg,pl_typ,tree,tot_nan_cnt,tot_wrp_cnt)
# else /* !old g++ */
# pragma omp parallel for private(idx,thr_idx) schedule(dynamic,40) shared(bDirtyRats,bSort,grd_lon_typ,max_nbr_vrl,nbr_tr,pl_cnt_dbg,pl_typ,tree,tot_nan_cnt,tot_wrp_cnt)
# endif /* !old g++ */
#endif /* !__INTEL_COMPILER */
for(idx=0 ; idx<pl_cnt_in ;idx++ ) {
nco_bool bSplit=False;
int vrl_cnt = 0;
int vrl_cnt_on = 0;
int nan_cnt=0;
int wrp_cnt=0;
size_t jdx;
double vrl_area = 0.0;
kd_box size1;
kd_box size2;
// (void) nco_poly_set_priority(max_nbr_vrl, list);
thr_idx=omp_get_thread_num();
if (0 && nco_dbg_lvl_get() >= nco_dbg_dev)
fprintf(fp_stderr, "%s(): idx=%lu thr=%d\n",fnc_nm, idx, thr_idx);
if(pl_lst_in[idx]->bmsk==False)
continue;
mem_lst[thr_idx].kd_cnt=0;
if(mem_lst[thr_idx].kd_blk_nbr >1)
{
mem_lst[thr_idx].kd_blk_nbr=1;
mem_lst[thr_idx].kd_list=(KDPriority*)nco_free(mem_lst[thr_idx].kd_list);
//mem_lst[thr_idx].kd_list=(KDPriority*)nco_realloc(mem_lst[idx].kd_list, sizeof(KDPriority) * NCO_VRL_BLOCKSIZE );
mem_lst[idx].kd_list=(KDPriority *)nco_calloc(sizeof(KDPriority),(size_t)(NCO_VRL_BLOCKSIZE));
}
/* get bounds of polygon in */
bSplit=nco_poly_minmax_split(pl_lst_in[idx],grd_lon_typ, size1,size2 );
/* if a wrapped polygon then do two searches */
if(bSplit)
vrl_cnt = kd_nearest_intersect_wrp(tree, nbr_tr, size1, size2, &mem_lst[thr_idx]);
else
vrl_cnt = kd_nearest_intersect(tree, nbr_tr, size1, &mem_lst[thr_idx], bSort);
/* nco_poly_prn(2, pl_lst_in[idx] ); */
for (jdx = 0; jdx < vrl_cnt; jdx++) {
poly_sct *pl_vrl = NULL_CEWI;
poly_sct *pl_out = (poly_sct *) mem_lst[thr_idx].kd_list[jdx].elem->item;
/* for area debug only */
mem_lst[thr_idx].kd_list[jdx].area=-1.0;
mem_lst[thr_idx].kd_list[jdx].dbg_sng[0]='\0';
/*
if (pl_lst_in[idx]->pl_typ != pl_out->pl_typ) {
fprintf(stderr, "%s: %s poly type mismatch\n", nco_prg_nm_get(), fnc_nm);
continue;
}
*/
if(pl_typ== poly_rll)
{
pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, 0, (char*)NULL);
/* if pl_vrl is NULL from, nco_poly_do_vrl() then there are 3 possible senario's
*
* 1) pl_lst_in[idx] and pl_out are seperate and distinct
*
* 2) pl_lst_in[idx] is entirely inside pl_out.
* to check for this it is sufficent to check the
* the minmax bounds and then also check that a single vertex
* from pl_lst_in[idx] is inside pl_out
*
* 3) pl_out is entirely inside pl_lst_in[idx]
* check minmax bounds then check a single vertex from
* pl_out
*/
if(!pl_vrl)
{
if (nco_poly_in_poly_minmax(pl_lst_in[idx], pl_out))
pl_vrl = nco_poly_dpl(pl_out);
else if (nco_poly_in_poly_minmax(pl_out, pl_lst_in[idx]))
pl_vrl = nco_poly_dpl(pl_lst_in[idx]);
}
if(pl_vrl)
{
/* add area nb also sets wrapping */
nco_poly_minmax_add(pl_vrl, grd_lon_typ, False);
/* REMEMBER poly_rll area uses minmax limits AND NOT VERTEX's */
nco_poly_area_add(pl_vrl);
}
}
if(pl_typ== poly_sph ) {
int lret = 0;
int lret2=0;
/* [flg] set by nco_sph_intersect_pre - if True it means that scan hase detected a genuine intersection so
* so no need to do further processing */
nco_bool bGenuine=False;
nco_bool bBadArea=False;
char in_sng[VP_MAX];
char out_sng[VP_MAX];
int flg_snp_to=2;
in_sng[0]='\0';
out_sng[0]='\0';
nco_sph_intersect_pre(pl_lst_in[idx], pl_out, in_sng);
if(0 && nco_dbg_lvl_get() >= nco_dbg_dev)
(void)fprintf(stderr,"%s:%s(): sp_sng=%s \n",nco_prg_nm_get(),fnc_nm, in_sng );
lret = nco_sph_process_pre(pl_out, in_sng, &bGenuine);
switch(lret)
{
case 1:
pl_vrl = nco_poly_dpl(pl_out);
break;
case 2:
pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, (char *)NULL);
break;
case 3:
pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, in_sng);
break;
case 4:
pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, (char*)NULL);
break;
case 5:
pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, in_sng);
break;
}
if(pl_vrl)
{
double min_area= ( pl_out->area < pl_lst_in[idx]->area ? pl_out->area : pl_lst_in[idx]->area );
/* add area nb also sets wrapping */
nco_poly_minmax_add(pl_vrl, grd_lon_typ, False);
/* REMEMBER poly_rll area uses minmax limits AND NOT VERTEX's */
nco_poly_area_add(pl_vrl);
if( isnan( pl_vrl->area) || pl_vrl->area == 0.0 || (pl_vrl->area - min_area)/ min_area > 1.0e-04 ) {
pl_vrl = nco_poly_free(pl_vrl);
bBadArea=True;
}
}
/* swap args around and try again */
if(!pl_vrl && ( (bGenuine==False && lret !=1 ) || bBadArea ) )
//if(!pl_vrl && ( (bGenuine==False && lret !=1 ) || bBadArea ) )
{
flg_snp_to=1;
nco_sph_intersect_pre(pl_out, pl_lst_in[idx], out_sng);
lret2 = nco_sph_process_pre(pl_lst_in[idx], out_sng, &bGenuine);
switch(lret2)
{
case 1:
pl_vrl = nco_poly_dpl(pl_lst_in[idx]);
break;
case 2:
pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, (char *)NULL);
break;
case 3:
pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, out_sng);
break;
case 4:
pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, (char*)NULL);
break;
case 5:
pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, out_sng);
break;
}
if(pl_vrl)
{
nco_poly_minmax_add(pl_vrl, grd_lon_typ, False);
nco_poly_area_add(pl_vrl);
}
}
if(bDirtyRats )
sprintf(mem_lst[thr_idx].kd_list[jdx].dbg_sng, "lret=%d in_sng=%s lret2=%d out_sng=%s\n",lret, in_sng, lret2, out_sng);
if(bDirtyRats && pl_vrl && !nco_sph_is_convex(pl_vrl->shp, pl_vrl->crn_nbr) )
{
(void) fprintf(stderr, "/************* concave overlap plygon***********/\n");
nco_poly_prn(pl_lst_in[idx],0);
nco_poly_prn(pl_out,0);
nco_poly_prn(pl_vrl, 0);
(void) fprintf(stderr, "/***********************************************/\n");
}
} /* end if poly_sph */
if (pl_vrl) {
// nco_poly_re_org(pl_vrl, lcl_dp_x, lcl_dp_y);
/* add aprropriate id's */
pl_vrl->src_id = pl_lst_in[idx]->src_id;
pl_vrl->dst_id = pl_out->src_id;
/* shp not needed */
nco_poly_shp_free(pl_vrl);
/* calculate weight -simple ratio of areas */
pl_vrl->wgt=pl_vrl->area / pl_out->area;
/* add lat/lon centers */
nco_poly_ctr_add(pl_vrl, grd_lon_typ);
wrp_cnt+=pl_vrl->bwrp;
if( isnan(pl_vrl->area) || pl_vrl->area ==0.0 )
{ nan_cnt++;
pl_vrl->area=0.0;
pl_vrl=nco_poly_free(pl_vrl);
continue;
}
/* for input polygon wgt is used to calculate frac_a */
pl_lst_in[idx]->wgt+= ( pl_vrl->area / pl_lst_in[idx]->area );
/*
#ifdef _OPENMP
#pragma omp critical
#endif
{
pl_out->wgt+=pl_vrl->wgt;
}
*/
vrl_area += pl_vrl->area;
if( mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE < mem_lst[thr_idx].pl_cnt +1 )
mem_lst[thr_idx].pl_lst= (poly_sct**)nco_realloc(mem_lst[thr_idx].pl_lst, sizeof(poly_sct*) * ++mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE );
mem_lst[thr_idx].pl_lst[ mem_lst[thr_idx].pl_cnt ]=pl_vrl;
mem_lst[thr_idx].pl_cnt++;
vrl_cnt_on++;
/* for area debug only */
mem_lst[thr_idx].kd_list[jdx].area=pl_vrl->area;
}
} /* end jdx */
if (nco_dbg_lvl_get() >= nco_dbg_dev) {
#ifdef _OPENMP
#pragma omp critical
#endif
{
tot_nan_cnt += nan_cnt;
tot_wrp_cnt += wrp_cnt;
tot_area += vrl_area;
} /* end OMP critical */
/* area diff by more than 10% */
int kdx;
double eps = 1e-8;
double frc = vrl_area / pl_lst_in[idx]->area;
if (frc < (1 - eps) || frc > 1 + eps) {
(void) fprintf(fp_stderr,
"%s: polygon %lu - potential overlaps=%d actual overlaps=%d area_in=%.10e vrl_area=%.10e adiff=%.15e bSplit=%d\n",
nco_prg_nm_get(), idx, vrl_cnt, vrl_cnt_on, pl_lst_in[idx]->area, vrl_area,
(pl_lst_in[idx]->area - vrl_area), bSplit);
if (bDirtyRats) {
//if (pl_lst_in[idx]->bwrp ) {
if (1) {
(void) fprintf(fp_stderr, "# /** following pl_lst_in[%lu] **/\n", idx);
nco_poly_prn(pl_lst_in[idx], 0);
(void) fprintf(fp_stderr, "# /** overlaps to follow **/\n");
for (kdx = 0; kdx < vrl_cnt; kdx++) {
nco_poly_prn((poly_sct *) mem_lst[thr_idx].kd_list[kdx].elem->item, 0);
(void)fprintf(fp_stderr, "# vrl_area=%.15e\n",mem_lst[thr_idx].kd_list[kdx].area );
(void)fprintf(fp_stderr, "# dbg_sng=%s\n",mem_lst[thr_idx].kd_list[kdx].dbg_sng );
}
(void) fprintf(stderr, "/************* end dirty rats ***************/\n");
}
if(1 && vrl_cnt_on>0) {
pl_lst_dbg = (poly_sct **) nco_realloc(pl_lst_dbg, sizeof(poly_sct *) * (pl_cnt_dbg + vrl_cnt_on));
/* write overlaps maybe */
for (kdx = 0; kdx < vrl_cnt_on; kdx++) {
poly_sct * lcl_poly=mem_lst[thr_idx].pl_lst[mem_lst[thr_idx].pl_cnt - vrl_cnt_on + kdx];
if(lcl_poly->src_id== pl_lst_in[idx]->src_id )
pl_lst_dbg[pl_cnt_dbg++] = nco_poly_dpl(lcl_poly);
}
}
}
}
} /* end dbg */
/* output some usefull tracking stuff - not debug but informative */
if ( ++mem_lst[thr_idx].idx_cnt % thr_quota_step == 0 && nco_dbg_lvl_get() >=3 )
(void)fprintf(fp_stderr, "%s: thread %d has processed %2.2f%% (%ld) of src cells quota and output %ld overlap cells\n", nco_prg_nm_get(), thr_idx, (float)mem_lst[thr_idx].idx_cnt/(float)thr_quota *100.0, mem_lst[thr_idx].idx_cnt, mem_lst[thr_idx].pl_cnt );
} /* end for idx */
/* turn tot_area into a % of 4*PI */
/* tot_area = tot_area / 4.0 / M_PI *100.0; */
/* final report */
if (nco_dbg_lvl_get() >= nco_dbg_dev)
(void) fprintf(stderr, "%s: total overlaps=%d, total_area=%.15f (area=%3.10f%%) total num wrapped= %d total nan nbr=%d \n", nco_prg_nm_get(), pl_cnt_vrl, tot_area, tot_area /4.0 / M_PI *100.0, tot_wrp_cnt, tot_nan_cnt);
/* write filtered polygons to file */
if(bDirtyRats && pl_cnt_dbg)
{
nco_msh_poly_lst_wrt("tst-wrt-dbg.nc", pl_lst_dbg, pl_cnt_dbg, grd_lon_typ,NC_FORMAT_NETCDF4);
pl_lst_dbg=(poly_sct**)nco_poly_lst_free(pl_lst_dbg, pl_cnt_dbg);
}
/* concatenate memory lists */
{
size_t tot_pl_cnt = 0;
poly_sct **tmp_pl_lst = NULL_CEWI;
/* find total size of lists */
for (idx = 0; idx < lcl_thr_nbr; idx++)
tot_pl_cnt += mem_lst[idx].pl_cnt;
pl_lst_vrl = mem_lst[0].pl_lst;
pl_lst_vrl = (poly_sct **) nco_realloc(pl_lst_vrl, sizeof(poly_sct *) * tot_pl_cnt);
tmp_pl_lst = pl_lst_vrl;
tmp_pl_lst += mem_lst[0].pl_cnt;
for (idx = 1; idx < lcl_thr_nbr; idx++) {
if (mem_lst[idx].pl_lst) {
memcpy(tmp_pl_lst, mem_lst[idx].pl_lst, sizeof(poly_sct *) * mem_lst[idx].pl_cnt);
tmp_pl_lst += mem_lst[idx].pl_cnt;
/* free up list */
mem_lst[idx].pl_lst = (poly_sct **) nco_free(mem_lst[idx].pl_lst);
}
}
*pl_cnt_vrl_ret=tot_pl_cnt;
}
/* free up kd_list's */
for(idx=0;idx<lcl_thr_nbr;idx++)
mem_lst[idx].kd_list= (KDPriority*) nco_free(mem_lst[idx].kd_list);
mem_lst=(omp_mem_sct*)nco_free(mem_lst);
return pl_lst_vrl;
}
/* check areas - nb WARNING modifies area in pl_lst_in and pl_lst_out */
void nco_poly_lst_chk(
poly_sct **pl_lst_in,
int pl_cnt_in,
poly_sct **pl_lst_out,
int pl_cnt_out,
poly_sct **pl_lst_vrl,
int pl_cnt_vrl)
{
int id;
int idx;
int jdx;
double epsilon=1.0e-8;
const char fnc_nm[]="nco_poly_lst_chk()";
for(idx=0;idx<pl_cnt_vrl;idx++)
{
id=pl_lst_vrl[idx]->src_id;
for(jdx=0;jdx<pl_cnt_in;jdx++)
if(pl_lst_in[jdx]->src_id==id)
break;
if(jdx < pl_cnt_in )
pl_lst_in[jdx]->area-=pl_lst_vrl[idx]->area;
}
fprintf(stderr, "%s():WARNING following is list of incomplete src cells, by src_id no\n",fnc_nm);
for(idx=0;idx<pl_cnt_in;idx++)
if( fabs( pl_lst_in[idx]->area) > epsilon)
fprintf(stderr, "src_id=%d area=%.10f\n", pl_lst_in[idx]->src_id, pl_lst_in[idx]->area );
for(idx=0;idx<pl_cnt_vrl;idx++)
{
id=pl_lst_vrl[idx]->dst_id;
for(jdx=0;jdx<pl_cnt_out;jdx++)
if(pl_lst_out[jdx]->src_id==id)
break;
if(jdx < pl_cnt_out )
pl_lst_out[jdx]->area-=pl_lst_vrl[idx]->area;
}
fprintf(stderr, "%s():WARNING following is list of incomplete dst cells, by src_id no\n",fnc_nm);
for(idx=0;idx<pl_cnt_out;idx++)
if( fabs( pl_lst_out[idx]->area) > epsilon)
fprintf(stderr, "src_id=%d area=%.10f\n", pl_lst_out[idx]->src_id, pl_lst_out[idx]->area );
return;
}
poly_sct **
nco_poly_lst_chk_dbg(
poly_sct **pl_lst,
int pl_cnt,
poly_sct **pl_lst_vrl,
int pl_cnt_vrl,
int io_flg, /* [flg] 0 - use src_id from vrl, 1 - use dst_id from vrl */
int *pl_cnt_dbg) /* size of output dbg grid */
{
int id;
int idx;
int jdx;
int pl_nbr_dbg=0;
double epsilon=1.0e-12;
double *area=NULL_CEWI;
nco_bool is_lst_cnt=False;
/* if true then pl_cnt matches max src_id There are no missing records from NetCDF SCRIP input */
is_lst_cnt=( pl_cnt== pl_lst[pl_cnt-1]->src_id +1);
poly_sct **pl_lst_dbg=NULL_CEWI;
const char fnc_nm[]="nco_poly_lst_chk_dbg()";
area=(double*)nco_malloc(sizeof(double)*pl_cnt);
for(idx=0;idx<pl_cnt;idx++)
area[idx]=pl_lst[idx]->area;
for(idx=0;idx<pl_cnt_vrl;idx++)
{
id = (io_flg ? pl_lst_vrl[idx]->dst_id : pl_lst_vrl[idx]->src_id);
if(is_lst_cnt )
area[id] -= pl_lst_vrl[idx]->area;
else
{
for (jdx = 0; jdx < pl_cnt; jdx++)
if (pl_lst[jdx]->src_id == id)
break;
if (jdx < pl_cnt)
area[jdx] -= pl_lst_vrl[idx]->area;
}
}
for(idx=0;idx<pl_cnt;idx++) {
if (fabs(area[idx]) > epsilon) {
if (nco_dbg_lvl_get() >= nco_dbg_dev)
fprintf(stderr, "%s() src_id=%d area=%.15e\n", fnc_nm, pl_lst[idx]->src_id, area[idx]);
pl_lst_dbg = (poly_sct **) nco_realloc(pl_lst_dbg, sizeof(poly_sct*) * (pl_nbr_dbg + 1));
pl_lst_dbg[pl_nbr_dbg] = nco_poly_dpl(pl_lst[idx]);
pl_nbr_dbg++;
}
}
area=(double*)nco_free(area);
*pl_cnt_dbg=pl_nbr_dbg;
return pl_lst_dbg;
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
size_t target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template<typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i) return false;
}
return true;
}
template<typename xpu, bool is_addto = false>
bool TransposeCommonImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
// return true when running successfully, otherwise false
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return true;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType, is_addto>(ret, src, axes, s);
});
return true;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return true;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
// return false when dimensions > 6
return false;
break;
}
});
return true;
}
template<typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
CHECK_LE(axes.ndim(), 6) << "TransposeImpl supports at most 6 dimensions";
CHECK((TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes))) <<
"Failed to execute TransposeImpl Operator";
}
template <bool is_addto>
struct TransposeExKernel {
/*!
* \brief
* \param tid global thread id
* \param out_data output data
* \param in_data input data
* \param strides input strides and output strides
* \param ndim the number of dimension
*/
template <typename DType>
MSHADOW_XINLINE static void Map(int tid,
DType *out_data,
const DType *in_data,
const dim_t *strides,
const int ndim
) {
// tid is the index of input data
const dim_t* const out_strides = strides + ndim;
int k = tid;
int out_id = 0;
for (int i = 0; i < ndim; ++i) {
out_id += (k / strides[i]) * out_strides[i];
k %= strides[i];
}
if (is_addto)
out_data[out_id] += in_data[tid];
else
out_data[out_id] = in_data[tid];
}
};
template<typename xpu, bool is_addto = false>
void TransposeExImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes,
mshadow::Tensor<xpu, 1, dim_t>& strides_xpu
) {
/*
* If ndim <= 6, it is not necessary to allocate any space for `strides_xpu`
* If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements
*/
using namespace mshadow;
using namespace mshadow::expr;
if (TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes)) return;
CHECK_GT(axes.ndim(), 6) <<
"Failed to execute TransposeExImpl when axes.ndim() <= 6";
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
CHECK_EQ(strides_xpu.MSize(), axes.ndim() * 2) << \
"If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements";
const mxnet::TShape &in_shape = src.shape_;
// strides: in_strides and out_strides
const int ndim = axes.ndim();
std::vector<dim_t> strides(ndim * 2);
// compute in_strides
strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * in_shape[i + 1];
}
// compute out_strides
std::vector<dim_t> tmp_strides(ndim);
tmp_strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
tmp_strides[i] = tmp_strides[i + 1] * in_shape[axes[i + 1]];
}
// reorder tmp_strides to out_strides
dim_t * const out_strides = &strides[ndim];
for (int i = 0; i < ndim; ++i) {
out_strides[axes[i]] = tmp_strides[i];
}
Shape<1> strides_shape;
strides_shape[0] = ndim * 2;
Tensor<cpu, 1, dim_t> strides_cpu(strides.data(), strides_shape);
// copy arguments into xpu context
Copy(strides_xpu, strides_cpu, s);
const DType *in = src.dptr<DType>();
DType *out = ret.dptr<DType>();
if (is_addto) {
mxnet_op::Kernel<TransposeExKernel<true>, xpu>::Launch(s,
in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
} else {
mxnet_op::Kernel<TransposeExKernel<false>, xpu>::Launch(s,
in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
}
});
}
template<typename xpu>
mshadow::Tensor<xpu, 1, dim_t> GetTransposeExWorkspace(
const OpContext& ctx,
const mxnet::TShape& axes
) {
if (axes.ndim() > 6) {
// allocate workspace when axes.ndim() > 6
mshadow::Shape<1> strides_shape;
strides_shape[0] = axes.ndim() * 2;
return ctx.requested[0].get_space_typed<xpu, 1, dim_t>(
strides_shape, ctx.get_stream<xpu>());
}
return {};
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
mshadow::Tensor<xpu, 1, dim_t> workspace =
GetTransposeExWorkspace<xpu>(ctx, axes);
if (req[0] == kAddTo) {
TransposeExImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0],
axes, workspace);
} else {
TransposeExImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0],
axes, workspace);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp))
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) {
return false;
}
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
index_t num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) {
return false;
}
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream a_min_s, a_max_s;
a_min_s << a_min;
a_max_s << a_max;
(*dict)["a_min"] = a_min_s.str();
(*dict)["a_max"] = a_max_s.str();
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream repeats_s, axis_s;
repeats_s << repeats;
axis_s << axis;
(*dict)["repeats"] = repeats_s.str();
(*dict)["axis"] = axis_s.str();
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) {
return false;
}
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream reps_s;
reps_s << reps;
(*dict)["reps"] = reps_s.str();
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, num_args_s;
axis_s << axis;
num_args_s << num_args;
(*dict)["axis"] = axis_s.str();
(*dict)["num_args"] = num_args_s.str();
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s;
indices_s << indices;
axis_s << axis;
squeeze_axis_s << squeeze_axis;
sections_s << sections;
(*dict)["indices"] = indices_s.str();
(*dict)["axis"] = axis_s.str();
(*dict)["squeeze_axis"] = squeeze_axis_s.str();
(*dict)["sections"] = sections_s.str();
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t) (ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if ( i < section_a ) {
indices[i+1] = section_size_a * (i + 1);
} else {
indices[i+1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
_phonopy.c | /* Copyright (C) 2011 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <Python.h>
#include <stdio.h>
#include <stddef.h>
#include <math.h>
#include <float.h>
#include <numpy/arrayobject.h>
#include "phonopy.h"
/* PHPYCONST is defined in dynmat.h */
/* Build dynamical matrix */
static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args);
static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args);
static PyObject *
py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args);
static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args);
static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args);
static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args);
static PyObject * py_get_recip_dipole_dipole(PyObject *self, PyObject *args);
static PyObject * py_get_recip_dipole_dipole_q0(PyObject *self, PyObject *args);
static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args);
static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args);
static PyObject * py_distribute_fc2(PyObject *self, PyObject *args);
static PyObject * py_compute_permutation(PyObject *self, PyObject *args);
static PyObject *
py_gsv_set_smallest_vectors_sparse(PyObject *self, PyObject *args);
static PyObject *
py_gsv_set_smallest_vectors_dense(PyObject *self, PyObject *args);
static PyObject *
py_thm_relative_grid_address(PyObject *self, PyObject *args);
static PyObject *
py_thm_all_relative_grid_address(PyObject *self, PyObject *args);
static PyObject *
py_thm_integration_weight(PyObject *self, PyObject *args);
static PyObject *
py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args);
static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args);
static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args);
struct module_state {
PyObject *error;
};
#if PY_MAJOR_VERSION >= 3
#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
#else
#define GETSTATE(m) (&_state)
static struct module_state _state;
#endif
static PyObject *
error_out(PyObject *m) {
struct module_state *st = GETSTATE(m);
PyErr_SetString(st->error, "something bad happened");
return NULL;
}
static PyMethodDef _phonopy_methods[] = {
{"error_out", (PyCFunction)error_out, METH_NOARGS, NULL},
{"transform_dynmat_to_fc", py_transform_dynmat_to_fc, METH_VARARGS,
"Transform a set of dynmat to force constants"},
{"perm_trans_symmetrize_fc", py_perm_trans_symmetrize_fc, METH_VARARGS,
"Enforce permutation and translational symmetry of force constants"},
{"perm_trans_symmetrize_compact_fc", py_perm_trans_symmetrize_compact_fc,
METH_VARARGS,
"Enforce permutation and translational symmetry of compact force constants"},
{"transpose_compact_fc", py_transpose_compact_fc,
METH_VARARGS,
"Transpose compact force constants"},
{"dynamical_matrix", py_get_dynamical_matrix, METH_VARARGS,
"Dynamical matrix"},
{"nac_dynamical_matrix", py_get_nac_dynamical_matrix, METH_VARARGS,
"NAC dynamical matrix"},
{"recip_dipole_dipole", py_get_recip_dipole_dipole, METH_VARARGS,
"Reciprocal part of dipole-dipole interaction"},
{"recip_dipole_dipole_q0", py_get_recip_dipole_dipole_q0, METH_VARARGS,
"q=0 terms of reciprocal part of dipole-dipole interaction"},
{"derivative_dynmat", py_get_derivative_dynmat, METH_VARARGS,
"Q derivative of dynamical matrix"},
{"thermal_properties", py_get_thermal_properties, METH_VARARGS,
"Thermal properties"},
{"distribute_fc2", py_distribute_fc2, METH_VARARGS,
"Distribute force constants for all atoms in atom_list using precomputed symmetry mappings."},
{"compute_permutation", py_compute_permutation, METH_VARARGS,
"Compute indices of original points in a set of rotated points."},
{"gsv_set_smallest_vectors_sparse", py_gsv_set_smallest_vectors_sparse,
METH_VARARGS, "Set shortest vectors in sparse array."},
{"gsv_set_smallest_vectors_dense", py_gsv_set_smallest_vectors_dense,
METH_VARARGS, "Set shortest vectors in dense array."},
{"tetrahedra_relative_grid_address", py_thm_relative_grid_address,
METH_VARARGS, "Relative grid addresses of vertices of 24 tetrahedra"},
{"all_tetrahedra_relative_grid_address",
py_thm_all_relative_grid_address, METH_VARARGS,
"4 (all) sets of relative grid addresses of vertices of 24 tetrahedra"},
{"tetrahedra_integration_weight", py_thm_integration_weight,
METH_VARARGS, "Integration weight for tetrahedron method"},
{"tetrahedra_integration_weight_at_omegas",
py_thm_integration_weight_at_omegas,
METH_VARARGS, "Integration weight for tetrahedron method at omegas"},
{"tetrahedra_frequencies", py_get_tetrahedra_frequenies,
METH_VARARGS, "Run tetrahedron method"},
{"tetrahedron_method_dos", py_tetrahedron_method_dos,
METH_VARARGS, "Run tetrahedron method"},
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static int _phonopy_traverse(PyObject *m, visitproc visit, void *arg) {
Py_VISIT(GETSTATE(m)->error);
return 0;
}
static int _phonopy_clear(PyObject *m) {
Py_CLEAR(GETSTATE(m)->error);
return 0;
}
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_phonopy",
NULL,
sizeof(struct module_state),
_phonopy_methods,
NULL,
_phonopy_traverse,
_phonopy_clear,
NULL
};
#define INITERROR return NULL
PyObject *
PyInit__phonopy(void)
#else
#define INITERROR return
void
init_phonopy(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module = PyModule_Create(&moduledef);
#else
PyObject *module = Py_InitModule("_phonopy", _phonopy_methods);
#endif
struct module_state *st;
if (module == NULL)
INITERROR;
st = GETSTATE(module);
st->error = PyErr_NewException("_phonopy.Error", NULL, NULL);
if (st->error == NULL) {
Py_DECREF(module);
INITERROR;
}
#if PY_MAJOR_VERSION >= 3
return module;
#endif
}
static PyObject * py_transform_dynmat_to_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_force_constants;
PyArrayObject* py_dynamical_matrices;
PyArrayObject* py_commensurate_points;
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_fc_index_map;
double* fc;
double* dm;
double (*comm_points)[3];
double (*shortest_vectors)[27][3];
double* masses;
int* multiplicities;
int* s2pp_map;
int* fc_index_map;
int num_patom;
int num_satom;
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_force_constants,
&py_dynamical_matrices,
&py_commensurate_points,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_s2pp_map,
&py_fc_index_map)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_force_constants);
dm = (double*)PyArray_DATA(py_dynamical_matrices);
comm_points = (double(*)[3])PyArray_DATA(py_commensurate_points);
shortest_vectors = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
masses = (double*)PyArray_DATA(py_masses);
multiplicities = (int*)PyArray_DATA(py_multiplicities);
s2pp_map = (int*)PyArray_DATA(py_s2pp_map);
fc_index_map = (int*)PyArray_DATA(py_fc_index_map);
num_patom = PyArray_DIMS(py_multiplicities)[1];
num_satom = PyArray_DIMS(py_multiplicities)[0];
phpy_transform_dynmat_to_fc(fc,
dm,
comm_points,
shortest_vectors,
multiplicities,
masses,
s2pp_map,
fc_index_map,
num_patom,
num_satom);
Py_RETURN_NONE;
}
static PyObject * py_compute_permutation(PyObject *self, PyObject *args)
{
PyArrayObject* permutation;
PyArrayObject* lattice;
PyArrayObject* positions;
PyArrayObject* permuted_positions;
double symprec;
int* rot_atoms;
double (*lat)[3];
double (*pos)[3];
double (*rot_pos)[3];
int num_pos;
int is_found;
if (!PyArg_ParseTuple(args, "OOOOd",
&permutation,
&lattice,
&positions,
&permuted_positions,
&symprec)) {
return NULL;
}
rot_atoms = (int*)PyArray_DATA(permutation);
lat = (double(*)[3])PyArray_DATA(lattice);
pos = (double(*)[3])PyArray_DATA(positions);
rot_pos = (double(*)[3])PyArray_DATA(permuted_positions);
num_pos = PyArray_DIMS(positions)[0];
is_found = phpy_compute_permutation(rot_atoms,
lat,
pos,
rot_pos,
num_pos,
symprec);
if (is_found) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
static PyObject *
py_gsv_set_smallest_vectors_sparse(PyObject *self, PyObject *args)
{
PyArrayObject* py_smallest_vectors;
PyArrayObject* py_multiplicity;
PyArrayObject* py_pos_to;
PyArrayObject* py_pos_from;
PyArrayObject* py_lattice_points;
PyArrayObject* py_reduced_basis;
PyArrayObject* py_trans_mat;
double symprec;
double (*smallest_vectors)[27][3];
int * multiplicity;
double (*pos_to)[3];
double (*pos_from)[3];
int (*lattice_points)[3];
double (*reduced_basis)[3];
int (*trans_mat)[3];
int num_pos_to, num_pos_from, num_lattice_points;
if (!PyArg_ParseTuple(args, "OOOOOOOd",
&py_smallest_vectors,
&py_multiplicity,
&py_pos_to,
&py_pos_from,
&py_lattice_points,
&py_reduced_basis,
&py_trans_mat,
&symprec)) {
return NULL;
}
smallest_vectors = (double(*)[27][3])PyArray_DATA(py_smallest_vectors);
multiplicity = (int*)PyArray_DATA(py_multiplicity);
pos_to = (double(*)[3])PyArray_DATA(py_pos_to);
pos_from = (double(*)[3])PyArray_DATA(py_pos_from);
num_pos_to = PyArray_DIMS(py_pos_to)[0];
num_pos_from = PyArray_DIMS(py_pos_from)[0];
lattice_points = (int(*)[3])PyArray_DATA(py_lattice_points);
num_lattice_points = PyArray_DIMS(py_lattice_points)[0];
reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis);
trans_mat = (int(*)[3])PyArray_DATA(py_trans_mat);
phpy_set_smallest_vectors_sparse(smallest_vectors,
multiplicity,
pos_to,
num_pos_to,
pos_from,
num_pos_from,
lattice_points,
num_lattice_points,
reduced_basis,
trans_mat,
symprec);
Py_RETURN_NONE;
}
static PyObject *
py_gsv_set_smallest_vectors_dense(PyObject *self, PyObject *args)
{
PyArrayObject* py_smallest_vectors;
PyArrayObject* py_multiplicity;
PyArrayObject* py_pos_to;
PyArrayObject* py_pos_from;
PyArrayObject* py_lattice_points;
PyArrayObject* py_reduced_basis;
PyArrayObject* py_trans_mat;
long initialize;
double symprec;
double (*smallest_vectors)[3];
long (*multiplicity)[2];
double (*pos_to)[3];
double (*pos_from)[3];
long (*lattice_points)[3];
double (*reduced_basis)[3];
long (*trans_mat)[3];
long num_pos_to, num_pos_from, num_lattice_points;
if (!PyArg_ParseTuple(args, "OOOOOOOld",
&py_smallest_vectors,
&py_multiplicity,
&py_pos_to,
&py_pos_from,
&py_lattice_points,
&py_reduced_basis,
&py_trans_mat,
&initialize,
&symprec)) {
return NULL;
}
smallest_vectors = (double(*)[3])PyArray_DATA(py_smallest_vectors);
multiplicity = (long(*)[2])PyArray_DATA(py_multiplicity);
pos_to = (double(*)[3])PyArray_DATA(py_pos_to);
pos_from = (double(*)[3])PyArray_DATA(py_pos_from);
num_pos_to = PyArray_DIMS(py_pos_to)[0];
num_pos_from = PyArray_DIMS(py_pos_from)[0];
lattice_points = (long(*)[3])PyArray_DATA(py_lattice_points);
num_lattice_points = PyArray_DIMS(py_lattice_points)[0];
reduced_basis = (double(*)[3])PyArray_DATA(py_reduced_basis);
trans_mat = (long(*)[3])PyArray_DATA(py_trans_mat);
phpy_set_smallest_vectors_dense(smallest_vectors,
multiplicity,
pos_to,
num_pos_to,
pos_from,
num_pos_from,
lattice_points,
num_lattice_points,
reduced_basis,
trans_mat,
initialize,
symprec);
Py_RETURN_NONE;
}
static PyObject * py_perm_trans_symmetrize_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc;
double *fc;
int level;
int n_satom;
if (!PyArg_ParseTuple(args, "Oi", &py_fc, &level)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_fc);
n_satom = PyArray_DIMS(py_fc)[0];
phpy_perm_trans_symmetrize_fc(fc, n_satom, level);
Py_RETURN_NONE;
}
static PyObject *
py_perm_trans_symmetrize_compact_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
int level;
double *fc;
int *perms;
int *s2pp;
int *p2s;
int *nsym_list;
int n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOOi",
&py_fc,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list,
&level)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_fc);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc)[0];
n_satom = PyArray_DIMS(py_fc)[1];
phpy_perm_trans_symmetrize_compact_fc(
fc, p2s, s2pp, nsym_list, perms, n_satom, n_patom, level);
Py_RETURN_NONE;
}
static PyObject * py_transpose_compact_fc(PyObject *self, PyObject *args)
{
PyArrayObject* py_fc;
PyArrayObject* py_permutations;
PyArrayObject* py_s2pp_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_nsym_list;
double *fc;
int *s2pp;
int *p2s;
int *nsym_list;
int *perms;
int n_patom, n_satom;
if (!PyArg_ParseTuple(args, "OOOOO",
&py_fc,
&py_permutations,
&py_s2pp_map,
&py_p2s_map,
&py_nsym_list)) {
return NULL;
}
fc = (double*)PyArray_DATA(py_fc);
perms = (int*)PyArray_DATA(py_permutations);
s2pp = (int*)PyArray_DATA(py_s2pp_map);
p2s = (int*)PyArray_DATA(py_p2s_map);
nsym_list = (int*)PyArray_DATA(py_nsym_list);
n_patom = PyArray_DIMS(py_fc)[0];
n_satom = PyArray_DIMS(py_fc)[1];
phpy_set_index_permutation_symmetry_compact_fc(fc,
p2s,
s2pp,
nsym_list,
perms,
n_satom,
n_patom,
1);
Py_RETURN_NONE;
}
static PyObject * py_get_dynamical_matrix(PyObject *self, PyObject *args)
{
PyArrayObject* py_dynamical_matrix;
PyArrayObject* py_force_constants;
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_q;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2p_map;
PyArrayObject* py_p2s_map;
double* dm;
double* fc;
double* q;
double (*svecs)[27][3];
double* m;
int* multi;
int* s2p_map;
int* p2s_map;
int num_patom;
int num_satom;
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_dynamical_matrix,
&py_force_constants,
&py_q,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_s2p_map,
&py_p2s_map)) {
return NULL;
}
dm = (double*)PyArray_DATA(py_dynamical_matrix);
fc = (double*)PyArray_DATA(py_force_constants);
q = (double*)PyArray_DATA(py_q);
svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
m = (double*)PyArray_DATA(py_masses);
multi = (int*)PyArray_DATA(py_multiplicities);
s2p_map = (int*)PyArray_DATA(py_s2p_map);
p2s_map = (int*)PyArray_DATA(py_p2s_map);
num_patom = PyArray_DIMS(py_p2s_map)[0];
num_satom = PyArray_DIMS(py_s2p_map)[0];
phpy_get_dynamical_matrix_at_q(dm,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
m,
s2p_map,
p2s_map,
NULL,
1);
Py_RETURN_NONE;
}
static PyObject * py_get_nac_dynamical_matrix(PyObject *self, PyObject *args)
{
PyArrayObject* py_dynamical_matrix;
PyArrayObject* py_force_constants;
PyArrayObject* py_shortest_vectors;
PyArrayObject* py_q_cart;
PyArrayObject* py_q;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2p_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_born;
double factor;
double* dm;
double* fc;
double* q_cart;
double* q;
double (*svecs)[27][3];
double* m;
double (*born)[3][3];
int* multi;
int* s2p_map;
int* p2s_map;
int num_patom;
int num_satom;
int n;
double (*charge_sum)[3][3];
if (!PyArg_ParseTuple(args, "OOOOOOOOOOd",
&py_dynamical_matrix,
&py_force_constants,
&py_q,
&py_shortest_vectors,
&py_multiplicities,
&py_masses,
&py_s2p_map,
&py_p2s_map,
&py_q_cart,
&py_born,
&factor))
return NULL;
dm = (double*)PyArray_DATA(py_dynamical_matrix);
fc = (double*)PyArray_DATA(py_force_constants);
q_cart = (double*)PyArray_DATA(py_q_cart);
q = (double*)PyArray_DATA(py_q);
svecs = (double(*)[27][3])PyArray_DATA(py_shortest_vectors);
m = (double*)PyArray_DATA(py_masses);
born = (double(*)[3][3])PyArray_DATA(py_born);
multi = (int*)PyArray_DATA(py_multiplicities);
s2p_map = (int*)PyArray_DATA(py_s2p_map);
p2s_map = (int*)PyArray_DATA(py_p2s_map);
num_patom = PyArray_DIMS(py_p2s_map)[0];
num_satom = PyArray_DIMS(py_s2p_map)[0];
charge_sum = (double(*)[3][3])
malloc(sizeof(double[3][3]) * num_patom * num_patom);
n = num_satom / num_patom;
phpy_get_charge_sum(charge_sum, num_patom, factor / n, q_cart, born);
phpy_get_dynamical_matrix_at_q(dm,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
m,
s2p_map,
p2s_map,
charge_sum,
1);
free(charge_sum);
Py_RETURN_NONE;
}
static PyObject * py_get_recip_dipole_dipole(PyObject *self, PyObject *args)
{
PyArrayObject* py_dd;
PyArrayObject* py_dd_q0;
PyArrayObject* py_G_list;
PyArrayObject* py_q_cart;
PyArrayObject* py_q_direction;
PyArrayObject* py_born;
PyArrayObject* py_dielectric;
PyArrayObject* py_positions;
double factor;
double lambda;
double tolerance;
double* dd;
double* dd_q0;
double (*G_list)[3];
double* q_vector;
double* q_direction;
double (*born)[3][3];
double (*dielectric)[3];
double (*pos)[3];
int num_patom, num_G;
if (!PyArg_ParseTuple(args, "OOOOOOOOddd",
&py_dd,
&py_dd_q0,
&py_G_list,
&py_q_cart,
&py_q_direction,
&py_born,
&py_dielectric,
&py_positions,
&factor,
&lambda,
&tolerance))
return NULL;
dd = (double*)PyArray_DATA(py_dd);
dd_q0 = (double*)PyArray_DATA(py_dd_q0);
G_list = (double(*)[3])PyArray_DATA(py_G_list);
if ((PyObject*)py_q_direction == Py_None) {
q_direction = NULL;
} else {
q_direction = (double*)PyArray_DATA(py_q_direction);
}
q_vector = (double*)PyArray_DATA(py_q_cart);
born = (double(*)[3][3])PyArray_DATA(py_born);
dielectric = (double(*)[3])PyArray_DATA(py_dielectric);
pos = (double(*)[3])PyArray_DATA(py_positions);
num_G = PyArray_DIMS(py_G_list)[0];
num_patom = PyArray_DIMS(py_positions)[0];
phpy_get_recip_dipole_dipole(dd, /* [natom, 3, natom, 3, (real, imag)] */
dd_q0, /* [natom, 3, 3, (real, imag)] */
G_list, /* [num_kvec, 3] */
num_G,
num_patom,
q_vector,
q_direction,
born,
dielectric,
pos, /* [natom, 3] */
factor, /* 4pi/V*unit-conv */
lambda, /* 4 * Lambda^2 */
tolerance);
Py_RETURN_NONE;
}
static PyObject * py_get_recip_dipole_dipole_q0(PyObject *self, PyObject *args)
{
PyArrayObject* py_dd_q0;
PyArrayObject* py_G_list;
PyArrayObject* py_born;
PyArrayObject* py_dielectric;
PyArrayObject* py_positions;
double lambda;
double tolerance;
double* dd_q0;
double (*G_list)[3];
double (*born)[3][3];
double (*dielectric)[3];
double (*pos)[3];
int num_patom, num_G;
if (!PyArg_ParseTuple(args, "OOOOOdd",
&py_dd_q0,
&py_G_list,
&py_born,
&py_dielectric,
&py_positions,
&lambda,
&tolerance))
return NULL;
dd_q0 = (double*)PyArray_DATA(py_dd_q0);
G_list = (double(*)[3])PyArray_DATA(py_G_list);
born = (double(*)[3][3])PyArray_DATA(py_born);
dielectric = (double(*)[3])PyArray_DATA(py_dielectric);
pos = (double(*)[3])PyArray_DATA(py_positions);
num_G = PyArray_DIMS(py_G_list)[0];
num_patom = PyArray_DIMS(py_positions)[0];
phpy_get_recip_dipole_dipole_q0(dd_q0, /* [natom, 3, 3, (real, imag)] */
G_list, /* [num_kvec, 3] */
num_G,
num_patom,
born,
dielectric,
pos, /* [natom, 3] */
lambda, /* 4 * Lambda^2 */
tolerance);
Py_RETURN_NONE;
}
static PyObject * py_get_derivative_dynmat(PyObject *self, PyObject *args)
{
PyArrayObject* derivative_dynmat;
PyArrayObject* py_force_constants;
PyArrayObject* r_vector;
PyArrayObject* lattice;
PyArrayObject* q_vector;
PyArrayObject* py_multiplicities;
PyArrayObject* py_masses;
PyArrayObject* py_s2p_map;
PyArrayObject* py_p2s_map;
PyArrayObject* py_born;
PyArrayObject* dielectric;
PyArrayObject* q_direction;
double nac_factor;
double* ddm;
double* fc;
double* q;
double* lat;
double* r;
double* m;
int* multi;
int* s2p_map;
int* p2s_map;
int num_patom;
int num_satom;
double *z;
double *epsilon;
double *q_dir;
if (!PyArg_ParseTuple(args, "OOOOOOOOOdOOO",
&derivative_dynmat,
&py_force_constants,
&q_vector,
&lattice, /* column vectors */
&r_vector,
&py_multiplicities,
&py_masses,
&py_s2p_map,
&py_p2s_map,
&nac_factor,
&py_born,
&dielectric,
&q_direction)) {
return NULL;
}
ddm = (double*)PyArray_DATA(derivative_dynmat);
fc = (double*)PyArray_DATA(py_force_constants);
q = (double*)PyArray_DATA(q_vector);
lat = (double*)PyArray_DATA(lattice);
r = (double*)PyArray_DATA(r_vector);
m = (double*)PyArray_DATA(py_masses);
multi = (int*)PyArray_DATA(py_multiplicities);
s2p_map = (int*)PyArray_DATA(py_s2p_map);
p2s_map = (int*)PyArray_DATA(py_p2s_map);
num_patom = PyArray_DIMS(py_p2s_map)[0];
num_satom = PyArray_DIMS(py_s2p_map)[0];
if ((PyObject*)py_born == Py_None) {
z = NULL;
} else {
z = (double*)PyArray_DATA(py_born);
}
if ((PyObject*)dielectric == Py_None) {
epsilon = NULL;
} else {
epsilon = (double*)PyArray_DATA(dielectric);
}
if ((PyObject*)q_direction == Py_None) {
q_dir = NULL;
} else {
q_dir = (double*)PyArray_DATA(q_direction);
}
phpy_get_derivative_dynmat_at_q(ddm,
num_patom,
num_satom,
fc,
q,
lat,
r,
multi,
m,
s2p_map,
p2s_map,
nac_factor,
z,
epsilon,
q_dir);
Py_RETURN_NONE;
}
/* Thermal properties */
static PyObject * py_get_thermal_properties(PyObject *self, PyObject *args)
{
PyArrayObject* py_thermal_props;
PyArrayObject* py_temperatures;
PyArrayObject* py_frequencies;
PyArrayObject* py_weights;
double cutoff_frequency;
double *temperatures;
double* freqs;
double *thermal_props;
long* weights;
long num_qpoints;
long num_bands;
long num_temp;
if (!PyArg_ParseTuple(args, "OOOOd",
&py_thermal_props,
&py_temperatures,
&py_frequencies,
&py_weights,
&cutoff_frequency)) {
return NULL;
}
thermal_props = (double*)PyArray_DATA(py_thermal_props);
temperatures = (double*)PyArray_DATA(py_temperatures);
num_temp = (long)PyArray_DIMS(py_temperatures)[0];
freqs = (double*)PyArray_DATA(py_frequencies);
num_qpoints = (long)PyArray_DIMS(py_frequencies)[0];
weights = (long*)PyArray_DATA(py_weights);
num_bands = (long)PyArray_DIMS(py_frequencies)[1];
phpy_get_thermal_properties(thermal_props,
temperatures,
freqs,
weights,
num_temp,
num_qpoints,
num_bands,
cutoff_frequency);
Py_RETURN_NONE;
}
static PyObject * py_distribute_fc2(PyObject *self, PyObject *args)
{
PyArrayObject* py_force_constants;
PyArrayObject* py_permutations;
PyArrayObject* py_map_atoms;
PyArrayObject* py_map_syms;
PyArrayObject* py_atom_list;
PyArrayObject* py_rotations_cart;
double (*r_carts)[3][3];
double (*fc2)[3][3];
int *permutations;
int *map_atoms;
int *map_syms;
int *atom_list;
npy_intp num_pos, num_rot, len_atom_list;
if (!PyArg_ParseTuple(args, "OOOOOO",
&py_force_constants,
&py_atom_list,
&py_rotations_cart,
&py_permutations,
&py_map_atoms,
&py_map_syms)) {
return NULL;
}
fc2 = (double(*)[3][3])PyArray_DATA(py_force_constants);
atom_list = (int*)PyArray_DATA(py_atom_list);
len_atom_list = PyArray_DIMS(py_atom_list)[0];
permutations = (int*)PyArray_DATA(py_permutations);
map_atoms = (int*)PyArray_DATA(py_map_atoms);
map_syms = (int*)PyArray_DATA(py_map_syms);
r_carts = (double(*)[3][3])PyArray_DATA(py_rotations_cart);
num_rot = PyArray_DIMS(py_permutations)[0];
num_pos = PyArray_DIMS(py_permutations)[1];
if (PyArray_NDIM(py_map_atoms) != 1 || PyArray_DIMS(py_map_atoms)[0] != num_pos)
{
PyErr_SetString(PyExc_ValueError, "wrong shape for map_atoms");
return NULL;
}
if (PyArray_NDIM(py_map_syms) != 1 || PyArray_DIMS(py_map_syms)[0] != num_pos)
{
PyErr_SetString(PyExc_ValueError, "wrong shape for map_syms");
return NULL;
}
if (PyArray_DIMS(py_rotations_cart)[0] != num_rot)
{
PyErr_SetString(PyExc_ValueError, "permutations and rotations are different length");
return NULL;
}
phpy_distribute_fc2(fc2,
atom_list,
len_atom_list,
r_carts,
permutations,
map_atoms,
map_syms,
num_rot,
num_pos);
Py_RETURN_NONE;
}
static PyObject *
py_thm_relative_grid_address(PyObject *self, PyObject *args)
{
PyArrayObject* py_relative_grid_address;
PyArrayObject* py_reciprocal_lattice_py;
long (*relative_grid_address)[4][3];
double (*reciprocal_lattice)[3];
if (!PyArg_ParseTuple(args, "OO",
&py_relative_grid_address,
&py_reciprocal_lattice_py)) {
return NULL;
}
relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address);
reciprocal_lattice = (double(*)[3])PyArray_DATA(py_reciprocal_lattice_py);
phpy_get_relative_grid_address(relative_grid_address, reciprocal_lattice);
Py_RETURN_NONE;
}
static PyObject *
py_thm_all_relative_grid_address(PyObject *self, PyObject *args)
{
PyArrayObject* py_relative_grid_address;
long (*relative_grid_address)[24][4][3];
if (!PyArg_ParseTuple(args, "O",
&py_relative_grid_address)) {
return NULL;
}
relative_grid_address =
(long(*)[24][4][3])PyArray_DATA(py_relative_grid_address);
phpy_get_all_relative_grid_address(relative_grid_address);
Py_RETURN_NONE;
}
static PyObject *
py_thm_integration_weight(PyObject *self, PyObject *args)
{
double omega;
PyArrayObject* py_tetrahedra_omegas;
char* function;
double (*tetrahedra_omegas)[4];
double iw;
if (!PyArg_ParseTuple(args, "dOs",
&omega,
&py_tetrahedra_omegas,
&function)) {
return NULL;
}
tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas);
iw = phpy_get_integration_weight(omega,
tetrahedra_omegas,
function[0]);
return PyFloat_FromDouble(iw);
}
static PyObject *
py_thm_integration_weight_at_omegas(PyObject *self, PyObject *args)
{
PyArrayObject* py_integration_weights;
PyArrayObject* py_omegas;
PyArrayObject* py_tetrahedra_omegas;
char* function;
double *omegas;
double *iw;
long num_omegas;
double (*tetrahedra_omegas)[4];
long i;
if (!PyArg_ParseTuple(args, "OOOs",
&py_integration_weights,
&py_omegas,
&py_tetrahedra_omegas,
&function)) {
return NULL;
}
omegas = (double*)PyArray_DATA(py_omegas);
iw = (double*)PyArray_DATA(py_integration_weights);
num_omegas = (long)PyArray_DIMS(py_omegas)[0];
tetrahedra_omegas = (double(*)[4])PyArray_DATA(py_tetrahedra_omegas);
#pragma omp parallel for
for (i = 0; i < num_omegas; i++) {
iw[i] = phpy_get_integration_weight(omegas[i],
tetrahedra_omegas,
function[0]);
}
Py_RETURN_NONE;
}
static PyObject * py_get_tetrahedra_frequenies(PyObject *self, PyObject *args)
{
PyArrayObject* py_freq_tetras;
PyArrayObject* py_grid_points;
PyArrayObject* py_mesh;
PyArrayObject* py_grid_address;
PyArrayObject* py_gp_ir_index;
PyArrayObject* py_relative_grid_address;
PyArrayObject* py_frequencies;
double* freq_tetras;
long* grid_points;
long* mesh;
long (*grid_address)[3];
long* gp_ir_index;
long (*relative_grid_address)[3];
double* frequencies;
long num_gp_in, num_band;
if (!PyArg_ParseTuple(args, "OOOOOOO",
&py_freq_tetras,
&py_grid_points,
&py_mesh,
&py_grid_address,
&py_gp_ir_index,
&py_relative_grid_address,
&py_frequencies)) {
return NULL;
}
freq_tetras = (double*)PyArray_DATA(py_freq_tetras);
grid_points = (long*)PyArray_DATA(py_grid_points);
num_gp_in = PyArray_DIMS(py_grid_points)[0];
mesh = (long*)PyArray_DATA(py_mesh);
grid_address = (long(*)[3])PyArray_DATA(py_grid_address);
gp_ir_index = (long*)PyArray_DATA(py_gp_ir_index);
relative_grid_address = (long(*)[3])PyArray_DATA(py_relative_grid_address);
frequencies = (double*)PyArray_DATA(py_frequencies);
num_band = PyArray_DIMS(py_frequencies)[1];
phpy_get_tetrahedra_frequenies(freq_tetras,
mesh,
grid_points,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies,
num_band,
num_gp_in);
Py_RETURN_NONE;
}
static PyObject * py_tetrahedron_method_dos(PyObject *self, PyObject *args)
{
PyArrayObject* py_dos;
PyArrayObject* py_mesh;
PyArrayObject* py_freq_points;
PyArrayObject* py_frequencies;
PyArrayObject* py_coef;
PyArrayObject* py_grid_address;
PyArrayObject* py_grid_mapping_table;
PyArrayObject* py_relative_grid_address;
double *dos;
long* mesh;
double* freq_points;
double* frequencies;
double* coef;
long (*grid_address)[3];
long num_gp, num_ir_gp, num_band, num_freq_points, num_coef;
long *grid_mapping_table;
long (*relative_grid_address)[4][3];
if (!PyArg_ParseTuple(args, "OOOOOOOO",
&py_dos,
&py_mesh,
&py_freq_points,
&py_frequencies,
&py_coef,
&py_grid_address,
&py_grid_mapping_table,
&py_relative_grid_address)) {
return NULL;
}
/* dos[num_ir_gp][num_band][num_freq_points][num_coef] */
dos = (double*)PyArray_DATA(py_dos);
mesh = (long*)PyArray_DATA(py_mesh);
freq_points = (double*)PyArray_DATA(py_freq_points);
num_freq_points = (long)PyArray_DIMS(py_freq_points)[0];
frequencies = (double*)PyArray_DATA(py_frequencies);
num_ir_gp = (long)PyArray_DIMS(py_frequencies)[0];
num_band = (long)PyArray_DIMS(py_frequencies)[1];
coef = (double*)PyArray_DATA(py_coef);
num_coef = (long)PyArray_DIMS(py_coef)[1];
grid_address = (long(*)[3])PyArray_DATA(py_grid_address);
num_gp = (long)PyArray_DIMS(py_grid_address)[0];
grid_mapping_table = (long*)PyArray_DATA(py_grid_mapping_table);
relative_grid_address = (long(*)[4][3])PyArray_DATA(py_relative_grid_address);
phpy_tetrahedron_method_dos(dos,
mesh,
grid_address,
relative_grid_address,
grid_mapping_table,
freq_points,
frequencies,
coef,
num_freq_points,
num_ir_gp,
num_band,
num_coef,
num_gp);
Py_RETURN_NONE;
}
|
omp_lock.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
omp_lock_t mylock;
int main()
{
omp_init_lock(&mylock);
#pragma omp parallel num_threads(4)
{
#pragma omp sections
{
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 1. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 2. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 3. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
#pragma omp section
{
omp_set_lock(&mylock);
sleep(1);
printf("[%d] 4. Hello world\n", omp_get_thread_num());
omp_unset_lock(&mylock);
}
} /* sections */
} /* parallel */
omp_destroy_lock(&mylock);
return 0;
}
|
vect-simd-clone-15.c | /* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#ifndef N
#define N 1024
#endif
int array[N];
#pragma omp declare simd linear(val(b):-3), notinbranch
__attribute__((noinline)) int
foo (int a, int b)
{
return a + b;
}
__attribute__((noinline, noclone)) void
bar ()
{
int i;
#pragma omp simd
for (i = 0; i < N; ++i)
array[i] = foo (i >> 1, -i * 3);
}
int
main ()
{
int i;
check_vect ();
bar ();
for (i = 0; i < N; i++)
if (array[i] != ((i >> 1) + (-3 * i)))
abort ();
return 0;
}
|
update_ops_pauli_multi.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* perform multi_qubit_Pauli_gate with XZ mask.
*
* This function assumes bit_flip_mask is not 0, i.e., at least one bit is flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask.
* This function update the quantum state with Pauli operation.
* bit_flip_mask, phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be computed before calling this function.
* See get_masks_from_*_list for the above four arguemnts.
*/
void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim);
void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, CTYPE* state, ITYPE dim) {
// pivot mask
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
// loop varaibles
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
#ifdef _OPENMP
UINT threshold = 14;
if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1);
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine sign
UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2;
UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = cval_1 * PHASE_M90ROT[(global_phase_90rot_count + sign_0 * 2) % 4];
state[basis_1] = cval_0 * PHASE_M90ROT[(global_phase_90rot_count + sign_1 * 2) % 4];
}
#ifdef _OPENMP
omp_set_num_threads(omp_get_max_threads());
#endif
}
void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim) {
// pivot mask
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
// loop varaibles
const ITYPE loop_dim = dim / 2;
ITYPE state_index;
// coefs
const double cosval = cos(angle / 2);
const double sinval = sin(angle / 2);
#ifdef _OPENMP
UINT threshold = 14;
if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1);
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
// gather index
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
// determine parity
int bit_parity_0 = count_population(basis_0 & phase_flip_mask) % 2;
int bit_parity_1 = count_population(basis_1 & phase_flip_mask) % 2;
// fetch values
CTYPE cval_0 = state[basis_0];
CTYPE cval_1 = state[basis_1];
// set values
state[basis_0] = cosval * cval_0 + 1.i * sinval * cval_1 * PHASE_M90ROT[(global_phase_90rot_count + bit_parity_0 * 2) % 4];
state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 * PHASE_M90ROT[(global_phase_90rot_count + bit_parity_1 * 2) % 4];
}
#ifdef _OPENMP
omp_set_num_threads(omp_get_max_threads());
#endif
}
void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim) {
// loop varaibles
const ITYPE loop_dim = dim;
ITYPE state_index;
#ifdef _OPENMP
UINT threshold = 14;
if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1);
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// determine parity
int bit_parity = count_population(state_index & phase_flip_mask) % 2;
// set values
if (bit_parity % 2 == 1) {
state[state_index] *= -1;
}
}
#ifdef _OPENMP
omp_set_num_threads(omp_get_max_threads());
#endif
}
void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim) {
// loop variables
const ITYPE loop_dim = dim;
ITYPE state_index;
// coefs
const double cosval = cos(angle / 2);
const double sinval = sin(angle / 2);
#ifdef _OPENMP
UINT threshold = 14;
if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1);
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// determine sign
int bit_parity = count_population(state_index & phase_flip_mask) % 2;
int sign = 1 - 2 * bit_parity;
// set value
state[state_index] *= cosval + (CTYPE)sign * 1.i * sinval;
}
#ifdef _OPENMP
omp_set_num_threads(omp_get_max_threads());
#endif
}
void multi_qubit_Pauli_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, CTYPE* state, ITYPE dim) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state, dim);
}
else {
multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
}
void multi_qubit_Pauli_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, CTYPE* state, ITYPE dim) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state, dim);
}
else {
multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
}
void multi_qubit_Pauli_rotation_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, CTYPE* state, ITYPE dim) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim);
}
else {
multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim);
}
}
void multi_qubit_Pauli_rotation_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, double angle, CTYPE* state, ITYPE dim) {
// create pauli mask and call function
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
if (bit_flip_mask == 0) {
multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim);
}
else {
multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim);
}
}
|
syrk.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* syrk.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "syrk.h"
/* Array initialization. */
static
void init_array(int n, int m,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D(C, N, N, n, n),
DATA_TYPE POLYBENCH_2D(A, N, M, n, m))
{
int i, j;
*alpha = 1.5;
*beta = 1.2;
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
A[i][j] = (DATA_TYPE) ((i * j + 1) % n) / n;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
C[i][j] = (DATA_TYPE) ((i * j + 2) % m) / m;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(C, N, N, n, n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("C");
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
{
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]);
}
POLYBENCH_DUMP_END("C");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_syrk(int n, int m,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(C, N, N, n, n),
DATA_TYPE POLYBENCH_2D(A, N, M, n, m))
{
int i, j, k;
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(n, beta, m, alpha, A)
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j <= i; j++)
C[i][j] *= beta;
for (k = 0; k < _PB_M; k++)
{
for (j = 0; j <= i; j++)
C[i][j] += alpha * A[i][k] * A[j][k];
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, N, N, n, n);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, M, n, m);
/* Initialize array(s). */
init_array (n, m, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_syrk (n, m, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(A);
return 0;
}
|
LAGraph_pagerank3c.c | //------------------------------------------------------------------------------
// LAGraph_pagerank3c: pagerank using a real semiring
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_pagerank3c: GAP-style PageRank, with import/export
// See also LAGraph_pagerank3a, for the same computation without import/export.
// This algorithm follows the specification given in the GAP Benchmark Suite:
// https://arxiv.org/abs/1508.03619 which assumes that both A and A' are
// already available, as are the row and column degrees.
// The GAP Benchmark algorithm assumes the graph has no nodes with no out-going
// edges (otherwise, a divide-by-zero occurs). In terms of the adjacency
// matrix, it assumes there are no rows in A that have no entries.
// For fastest results, the input matrix should stored in GxB_BY_COL format.
// TODO: or use AT by row, since the GAP assumes both A and A' are available.
#include "LAGraph.h"
#define LAGRAPH_FREE_WORK \
{ \
LAGRAPH_FREE (I) ; \
LAGRAPH_FREE (pr) ; \
LAGRAPH_FREE (prior) ; \
GrB_free (&v) ; \
}
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE_WORK ; \
GrB_free (result) ; \
}
GrB_Info LAGraph_pagerank3c // PageRank definition
(
GrB_Vector *result, // output: array of LAGraph_PageRank structs
GrB_Matrix A, // binary input graph, not modified
const float *restrict d_out, // out degree of each node (GrB_FP32, size n)
float damping, // damping factor (typically 0.85)
int itermax, // maximum number of iterations
int *iters // output: number of iterations taken
)
{
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Index n, ncols ;
GrB_Vector v = NULL ;
GrB_Index *I = NULL ;
float *restrict pr = NULL ;
float *prior = NULL ;
(*result) = NULL ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nrows (&n, A) ;
if (ncols != n)
{
LAGRAPH_ERROR ("matrix must be square", GrB_DIMENSION_MISMATCH) ;
}
// Teleport value
const float teleport = (1 - damping) / n ;
const float tol = 1e-4 ;
float rdiff = 1 ; // first iteration is always done
GrB_Type type = GrB_FP32 ;
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n, nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// initializing pr and I
pr = LAGraph_malloc (n, sizeof (float)) ;
I = LAGraph_malloc (n, sizeof (GrB_Index)) ;
prior = LAGraph_malloc (n, sizeof (float)) ;
if (pr == NULL || I == NULL || prior == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < n ; k++)
{
I [k] = k ;
pr [k] = 1.0/n ;
}
//--------------------------------------------------------------------------
// pagerank iterations
//--------------------------------------------------------------------------
for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++)
{
// printf ("\n============================ pagerank 3C iter: %d\n", (*iters)) ;
// Importance calculation
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t i = 0 ; i < n; i++)
{
prior [i] = pr [i] ;
pr [i] = damping * pr [i] / d_out [i] ;
}
// import pr and I into v
LAGr_Vector_import (&v, GrB_FP32, n, n, &I, (void **) (&pr), NULL) ;
// Calculate total PR of all inbound vertices: v = A' * v
LAGr_mxv (v, NULL, NULL, GxB_PLUS_SECOND_FP32, A, v, LAGraph_desc_tooo);
GrB_Index nvals ;
LAGr_Vector_nvals (&nvals, v) ;
if (nvals != n)
{
LAGRAPH_ERROR ("Matrix must not have empty rows or columns!",
GrB_PANIC) ;
}
// export v to pr and I
LAGr_Vector_export (&v, &type, &n, &nvals, &I, (void **) (&pr), NULL) ;
// add teleport and check for convergence
rdiff = 0 ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:rdiff)
for (int64_t i = 0 ; i < n; i++)
{
pr [i] += teleport ;
rdiff += fabsf (prior [i] - pr [i]) ;
}
}
// import result (pr and I) into final result
LAGr_Vector_import (result, GrB_FP32, n, n, &I, (void **) (&pr), NULL) ;
LAGRAPH_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
convolution_sgemm.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __AVX__
static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const float* kernel = _kernel;
// kernel memory packed 8 x 8
kernel_tm.create(8 * kernel_size, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const float* k0 = kernel + (p + 0) * inch * kernel_size;
const float* k1 = kernel + (p + 1) * inch * kernel_size;
const float* k2 = kernel + (p + 2) * inch * kernel_size;
const float* k3 = kernel + (p + 3) * inch * kernel_size;
const float* k4 = kernel + (p + 4) * inch * kernel_size;
const float* k5 = kernel + (p + 5) * inch * kernel_size;
const float* k6 = kernel + (p + 6) * inch * kernel_size;
const float* k7 = kernel + (p + 7) * inch * kernel_size;
float* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const float* k0 = kernel + (p + 0) * inch * kernel_size;
const float* k1 = kernel + (p + 1) * inch * kernel_size;
const float* k2 = kernel + (p + 2) * inch * kernel_size;
const float* k3 = kernel + (p + 3) * inch * kernel_size;
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
const float* k0 = kernel + (p + 0) * inch * kernel_size;
float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// im2col
Mat bottom_im2col(outw * outh, kernel_h * kernel_w * inch, elemsize, opt.workspace_allocator);
{
const int stride = kernel_h * kernel_w * outw * outh;
float* ret = (float*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const float* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 8 x 8
Mat bottom_tm(8 * kernel_size, inch, out_size / 8 + out_size % 8, elemsize, opt.workspace_allocator);
{
int nn_size = out_size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i / 8);
for (int q = 0; q < inch * kernel_size; q++)
{
#if __AVX__
_mm256_storeu_ps(tmpptr, _mm256_loadu_ps(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
#endif // __SSE__
tmpptr += 8;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i / 8 + i % 8);
for (int q = 0; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
//int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i + 1);
float* output2 = top_blob.channel(i + 2);
float* output3 = top_blob.channel(i + 3);
float* output4 = top_blob.channel(i + 4);
float* output5 = top_blob.channel(i + 5);
float* output6 = top_blob.channel(i + 6);
float* output7 = top_blob.channel(i + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + i : zeros;
int j = 0;
for (; j + 7 < N; j = j + 8)
{
const float* vb = bottom_tm.channel(j / 8);
const float* va = kernel_tm.channel(i / 8);
#if __AVX__
__m256 _sum0 = _mm256_broadcast_ss(biasptr);
__m256 _sum1 = _mm256_broadcast_ss(biasptr + 1);
__m256 _sum2 = _mm256_broadcast_ss(biasptr + 2);
__m256 _sum3 = _mm256_broadcast_ss(biasptr + 3);
__m256 _sum4 = _mm256_broadcast_ss(biasptr + 4);
__m256 _sum5 = _mm256_broadcast_ss(biasptr + 5);
__m256 _sum6 = _mm256_broadcast_ss(biasptr + 6);
__m256 _sum7 = _mm256_broadcast_ss(biasptr + 7);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_comp_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_comp_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_comp_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_comp_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_comp_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_comp_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70
va += 8;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_comp_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_comp_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_comp_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_comp_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_comp_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41
_sum5 = _mm256_comp_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51
_sum6 = _mm256_comp_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61
_sum7 = _mm256_comp_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71
va += 8;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_comp_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_comp_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_comp_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_comp_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_comp_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42
_sum5 = _mm256_comp_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52
_sum6 = _mm256_comp_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62
_sum7 = _mm256_comp_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72
va += 8;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_comp_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_comp_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_comp_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_comp_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_comp_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43
_sum5 = _mm256_comp_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53
_sum6 = _mm256_comp_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63
_sum7 = _mm256_comp_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73
va += 8;
vb += 32;
}
for (; k < L; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _va4 = _mm256_broadcast_ss(va + 4);
__m256 _va5 = _mm256_broadcast_ss(va + 5);
__m256 _va6 = _mm256_broadcast_ss(va + 6);
__m256 _va7 = _mm256_broadcast_ss(va + 7);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_comp_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_comp_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_comp_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_sum4 = _mm256_comp_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_comp_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_comp_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_comp_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70
va += 8;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
_mm256_storeu_ps(output4, _sum4);
_mm256_storeu_ps(output5, _sum5);
_mm256_storeu_ps(output6, _sum6);
_mm256_storeu_ps(output7, _sum7);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
float sum4[8] = {0};
float sum5[8] = {0};
float sum6[8] = {0};
float sum7[8] = {0};
int k = 0;
for (; k + 7 < L; k = k + 8)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
va += 8;
sum0[n] += va[0] * vb[n + 8];
sum1[n] += va[1] * vb[n + 8];
sum2[n] += va[2] * vb[n + 8];
sum3[n] += va[3] * vb[n + 8];
sum4[n] += va[4] * vb[n + 8];
sum5[n] += va[5] * vb[n + 8];
sum6[n] += va[6] * vb[n + 8];
sum7[n] += va[7] * vb[n + 8];
va += 8;
sum0[n] += va[0] * vb[n + 16];
sum1[n] += va[1] * vb[n + 16];
sum2[n] += va[2] * vb[n + 16];
sum3[n] += va[3] * vb[n + 16];
sum4[n] += va[4] * vb[n + 16];
sum5[n] += va[5] * vb[n + 16];
sum6[n] += va[6] * vb[n + 16];
sum7[n] += va[7] * vb[n + 16];
va += 8;
sum0[n] += va[0] * vb[n + 24];
sum1[n] += va[1] * vb[n + 24];
sum2[n] += va[2] * vb[n + 24];
sum3[n] += va[3] * vb[n + 24];
sum4[n] += va[4] * vb[n + 24];
sum5[n] += va[5] * vb[n + 24];
sum6[n] += va[6] * vb[n + 24];
sum7[n] += va[7] * vb[n + 24];
va += 8;
sum0[n] += va[0] * vb[n + 32];
sum1[n] += va[1] * vb[n + 32];
sum2[n] += va[2] * vb[n + 32];
sum3[n] += va[3] * vb[n + 32];
sum4[n] += va[4] * vb[n + 32];
sum5[n] += va[5] * vb[n + 32];
sum6[n] += va[6] * vb[n + 32];
sum7[n] += va[7] * vb[n + 32];
va += 8;
sum0[n] += va[0] * vb[n + 40];
sum1[n] += va[1] * vb[n + 40];
sum2[n] += va[2] * vb[n + 40];
sum3[n] += va[3] * vb[n + 40];
sum4[n] += va[4] * vb[n + 40];
sum5[n] += va[5] * vb[n + 40];
sum6[n] += va[6] * vb[n + 40];
sum7[n] += va[7] * vb[n + 40];
va += 8;
sum0[n] += va[0] * vb[n + 48];
sum1[n] += va[1] * vb[n + 48];
sum2[n] += va[2] * vb[n + 48];
sum3[n] += va[3] * vb[n + 48];
sum4[n] += va[4] * vb[n + 48];
sum5[n] += va[5] * vb[n + 48];
sum6[n] += va[6] * vb[n + 48];
sum7[n] += va[7] * vb[n + 48];
va += 8;
sum0[n] += va[0] * vb[n + 56];
sum1[n] += va[1] * vb[n + 56];
sum2[n] += va[2] * vb[n + 56];
sum3[n] += va[3] * vb[n + 56];
sum4[n] += va[4] * vb[n + 56];
sum5[n] += va[5] * vb[n + 56];
sum6[n] += va[6] * vb[n + 56];
sum7[n] += va[7] * vb[n + 56];
va -= 56;
}
va += 64;
vb += 64;
}
for (; k < L; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n] + biasptr[0];
output1[n] = sum1[n] + biasptr[1];
output2[n] = sum2[n] + biasptr[2];
output3[n] = sum3[n] + biasptr[3];
output4[n] = sum4[n] + biasptr[4];
output5[n] = sum5[n] + biasptr[5];
output6[n] = sum6[n] + biasptr[6];
output7[n] = sum7[n] + biasptr[7];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
const float* vb = bottom_tm.channel(j / 8 + j % 8);
const float* va = kernel_tm.channel(i / 8);
#if __AVX__
__m256 _sum0_7 = _mm256_loadu_ps(biasptr);
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _vb1 = _mm256_broadcast_ss(vb + 1);
__m256 _vb2 = _mm256_broadcast_ss(vb + 2);
__m256 _vb3 = _mm256_broadcast_ss(vb + 3);
__m256 _va0 = _mm256_loadu_ps(va);
__m256 _va1 = _mm256_loadu_ps(va + 8);
__m256 _va2 = _mm256_loadu_ps(va + 16);
__m256 _va3 = _mm256_loadu_ps(va + 24);
_sum0 = _mm256_comp_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00
_sum1 = _mm256_comp_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10
_sum2 = _mm256_comp_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20
_sum3 = _mm256_comp_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30
va += 32;
vb += 4;
}
_sum0 = _mm256_add_ps(_sum0, _sum1);
_sum2 = _mm256_add_ps(_sum2, _sum3);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum0);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum2);
for (; k < L; k++)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _va = _mm256_loadu_ps(va);
_sum0_7 = _mm256_comp_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00
va += 8;
vb += 1;
}
float output_sum0_7[8] = {0.f};
_mm256_storeu_ps(output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
float sum4 = biasptr[4];
float sum5 = biasptr[5];
float sum6 = biasptr[6];
float sum7 = biasptr[7];
for (int k = 0; k < L; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i + 1);
float* output2 = top_blob.channel(i + 2);
float* output3 = top_blob.channel(i + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + i : zeros;
int j = 0;
for (; j + 7 < N; j = j + 8)
{
const float* vb = bottom_tm.channel(j / 8);
const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4);
#if __AVX__
__m256 _sum0 = _mm256_broadcast_ss(biasptr);
__m256 _sum1 = _mm256_broadcast_ss(biasptr + 1);
__m256 _sum2 = _mm256_broadcast_ss(biasptr + 2);
__m256 _sum3 = _mm256_broadcast_ss(biasptr + 3);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_comp_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_comp_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_comp_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_comp_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_comp_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_comp_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_comp_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
va += 4;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_comp_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_comp_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_comp_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_comp_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
va += 4;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_comp_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_comp_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_comp_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_comp_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
va += 4;
vb += 32;
}
for (; k < L; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_comp_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_comp_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_comp_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
int k = 0;
for (; k + 7 < L; k = k + 8)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
va += 4;
sum0[n] += va[0] * vb[n + 8];
sum1[n] += va[1] * vb[n + 8];
sum2[n] += va[2] * vb[n + 8];
sum3[n] += va[3] * vb[n + 8];
va += 4;
sum0[n] += va[0] * vb[n + 16];
sum1[n] += va[1] * vb[n + 16];
sum2[n] += va[2] * vb[n + 16];
sum3[n] += va[3] * vb[n + 16];
va += 4;
sum0[n] += va[0] * vb[n + 24];
sum1[n] += va[1] * vb[n + 24];
sum2[n] += va[2] * vb[n + 24];
sum3[n] += va[3] * vb[n + 24];
va += 4;
sum0[n] += va[0] * vb[n + 32];
sum1[n] += va[1] * vb[n + 32];
sum2[n] += va[2] * vb[n + 32];
sum3[n] += va[3] * vb[n + 32];
va += 4;
sum0[n] += va[0] * vb[n + 40];
sum1[n] += va[1] * vb[n + 40];
sum2[n] += va[2] * vb[n + 40];
sum3[n] += va[3] * vb[n + 40];
va += 4;
sum0[n] += va[0] * vb[n + 48];
sum1[n] += va[1] * vb[n + 48];
sum2[n] += va[2] * vb[n + 48];
sum3[n] += va[3] * vb[n + 48];
va += 4;
sum0[n] += va[0] * vb[n + 56];
sum1[n] += va[1] * vb[n + 56];
sum2[n] += va[2] * vb[n + 56];
sum3[n] += va[3] * vb[n + 56];
va -= 28;
}
va += 32;
vb += 64;
}
for (; k < L; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n] + biasptr[0];
output1[n] = sum1[n] + biasptr[1];
output2[n] = sum2[n] + biasptr[2];
output3[n] = sum3[n] + biasptr[3];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
const float* vb = bottom_tm.channel(j / 8 + j % 8);
const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4);
#if __AVX__
__m128 _sum0_3 = _mm_loadu_ps(biasptr);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_comp_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00
_sum1 = _mm_comp_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10
_sum2 = _mm_comp_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20
_sum3 = _mm_comp_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < L; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_comp_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
float output_sum0_3[4] = {0.f};
_mm_storeu_ps(output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int k = 0; k < L; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
int j = 0;
for (; j + 7 < N; j = j + 8)
{
const float* vb = bottom_tm.channel(j / 8);
const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4 + i % 4);
#if __AVX__
__m256 _sum0 = _mm256_broadcast_ss(&bias0);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum0 = _mm256_comp_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01
_sum0 = _mm256_comp_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02
_sum0 = _mm256_comp_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03
va += 4;
vb += 32;
}
for (; k < L; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_comp_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
va += 1;
vb += 8;
}
_mm256_storeu_ps(output, _sum0);
#else
float sum[8] = {0};
int k = 0;
for (; k + 7 < L; k = k + 8)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
sum[n] += va[1] * vb[n + 8];
sum[n] += va[2] * vb[n + 16];
sum[n] += va[3] * vb[n + 24];
sum[n] += va[4] * vb[n + 32];
sum[n] += va[5] * vb[n + 40];
sum[n] += va[6] * vb[n + 48];
sum[n] += va[7] * vb[n + 56];
}
va += 8;
vb += 64;
}
for (; k < L; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n] + bias0;
}
#endif // __AVX__
output += 8;
}
for (; j < N; j++)
{
const float* vb = bottom_tm.channel(j / 8 + j % 8);
const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4 + i % 4);
int k = 0;
#if __AVX__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < L; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
vb += 4;
__m128 _k0 = _mm_loadu_ps(va);
va += 4;
_sum0 = _mm_comp_fmadd_ps(_p0, _k0, _sum0);
}
float output_sum0[4] = {0.f};
_mm_storeu_ps(output_sum0, _sum0);
float sum0 = bias0 + output_sum0[0] + output_sum0[1] + output_sum0[2] + output_sum0[3];
#else
float sum0 = bias0;
#endif // __AVX__
for (; k < L; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
}
#else
static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const float* kernel = _kernel;
// kernel memory packed 4 x 4
kernel_tm.create(4 * kernel_size, inch, outch / 4 + outch % 4);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const float* k0 = kernel + (p + 0) * inch * kernel_size;
const float* k1 = kernel + (p + 1) * inch * kernel_size;
const float* k2 = kernel + (p + 2) * inch * kernel_size;
const float* k3 = kernel + (p + 3) * inch * kernel_size;
float* ktmp = kernel_tm.channel(p / 4);
for (int q = 0; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p = remain_outch_start; p < outch; p++)
{
const float* k0 = kernel + (p + 0) * inch * kernel_size;
float* ktmp = kernel_tm.channel(p / 4 + p % 4);
for (int q = 0; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// im2col
Mat bottom_im2col(outw * outh, kernel_h * kernel_w * inch, elemsize, opt.workspace_allocator);
{
const int stride = kernel_h * kernel_w * outw * outh;
float* ret = (float*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const float* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, elemsize, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i / 4);
for (int q = 0; q < inch * kernel_size; q++)
{
#if __SSE__
_mm_storeu_ps(tmpptr, _mm_loadu_ps(img0));
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
#endif // __SSE__
tmpptr += 4;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const float* img0 = bottom_im2col.channel(0);
img0 += i;
float* tmpptr = bottom_tm.channel(i / 4 + i % 4);
for (int q = 0; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
//int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i + 1);
float* output2 = top_blob.channel(i + 2);
float* output3 = top_blob.channel(i + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + i : zeros;
int j = 0;
for (; j + 3 < N; j = j + 4)
{
const float* vb = bottom_tm.channel(j / 4);
const float* va = kernel_tm.channel(i / 4);
#if __SSE__
__m128 _sum0 = _mm_set1_ps(biasptr[0]);
__m128 _sum1 = _mm_set1_ps(biasptr[1]);
__m128 _sum2 = _mm_set1_ps(biasptr[2]);
__m128 _sum3 = _mm_set1_ps(biasptr[3]);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30
// k1
_vb = _mm_loadu_ps(vb + 4);
_va0 = _mm_set1_ps(va[4]);
_va1 = _mm_set1_ps(va[5]);
_va2 = _mm_set1_ps(va[6]);
_va3 = _mm_set1_ps(va[7]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a10-a13) * k01
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a10-a13) * k11
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a10-a13) * k21
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a10-a13) * k31
// k2
_vb = _mm_loadu_ps(vb + 8);
_va0 = _mm_set1_ps(va[8]);
_va1 = _mm_set1_ps(va[9]);
_va2 = _mm_set1_ps(va[10]);
_va3 = _mm_set1_ps(va[11]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a20-a23) * k02
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a20-a23) * k12
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a20-a23) * k22
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a20-a23) * k32
// k3
_vb = _mm_loadu_ps(vb + 12);
_va0 = _mm_set1_ps(va[12]);
_va1 = _mm_set1_ps(va[13]);
_va2 = _mm_set1_ps(va[14]);
_va3 = _mm_set1_ps(va[15]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a30-a33) * k03
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a30-a33) * k13
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a30-a33) * k23
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a30-a33) * k33
va += 16;
vb += 16;
}
for (; k < L; k++)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30
va += 4;
vb += 4;
}
_mm_storeu_ps(output0, _sum0);
_mm_storeu_ps(output1, _sum1);
_mm_storeu_ps(output2, _sum2);
_mm_storeu_ps(output3, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
int k = 0;
for (; k + 7 < L; k = k + 8)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
va += 4;
sum0[n] += va[0] * vb[n + 4];
sum1[n] += va[1] * vb[n + 4];
sum2[n] += va[2] * vb[n + 4];
sum3[n] += va[3] * vb[n + 4];
va += 4;
sum0[n] += va[0] * vb[n + 8];
sum1[n] += va[1] * vb[n + 8];
sum2[n] += va[2] * vb[n + 8];
sum3[n] += va[3] * vb[n + 8];
va += 4;
sum0[n] += va[0] * vb[n + 12];
sum1[n] += va[1] * vb[n + 12];
sum2[n] += va[2] * vb[n + 12];
sum3[n] += va[3] * vb[n + 12];
va += 4;
sum0[n] += va[0] * vb[n + 16];
sum1[n] += va[1] * vb[n + 16];
sum2[n] += va[2] * vb[n + 16];
sum3[n] += va[3] * vb[n + 16];
va += 4;
sum0[n] += va[0] * vb[n + 20];
sum1[n] += va[1] * vb[n + 20];
sum2[n] += va[2] * vb[n + 20];
sum3[n] += va[3] * vb[n + 20];
va += 4;
sum0[n] += va[0] * vb[n + 24];
sum1[n] += va[1] * vb[n + 24];
sum2[n] += va[2] * vb[n + 24];
sum3[n] += va[3] * vb[n + 24];
va += 4;
sum0[n] += va[0] * vb[n + 28];
sum1[n] += va[1] * vb[n + 28];
sum2[n] += va[2] * vb[n + 28];
sum3[n] += va[3] * vb[n + 28];
va -= 28;
}
va += 32;
vb += 32;
}
for (; k < L; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n] + biasptr[0];
output1[n] = sum1[n] + biasptr[1];
output2[n] = sum2[n] + biasptr[2];
output3[n] = sum3[n] + biasptr[3];
}
#endif // __SSE__
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
const float* vb = bottom_tm.channel(j / 4 + j % 4);
const float* va = kernel_tm.channel(i / 4);
#if __SSE__
__m128 _sum0_3 = _mm_loadu_ps(biasptr);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); // sum0 += (k00-k30) * a00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1)); // sum1 += (k01-k31) * a10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2)); // sum2 += (k02-k32) * a20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3)); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < L; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0)); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
float sum0_3_tmp[4];
_mm_storeu_ps(sum0_3_tmp, _sum0_3);
output0[0] = sum0_3_tmp[0];
output1[0] = sum0_3_tmp[1];
output2[0] = sum0_3_tmp[2];
output3[0] = sum0_3_tmp[3];
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int k = 0; k < L; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __SSE__
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
int j = 0;
for (; j + 3 < N; j = j + 4)
{
const float* vb = bottom_tm.channel(j / 4);
const float* va = kernel_tm.channel(i / 4 + i % 4);
#if __SSE__
__m128 _sum0 = _mm_set1_ps(bias0);
int k = 0;
for (; k + 3 < L; k = k + 4)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
__m128 _vb0 = _mm_loadu_ps(vb);
__m128 _vb1 = _mm_loadu_ps(vb + 4);
__m128 _vb2 = _mm_loadu_ps(vb + 8);
__m128 _vb3 = _mm_loadu_ps(vb + 12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1)); // sum0 += (a10-a13) * k01
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2)); // sum0 += (a20-a23) * k02
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3)); // sum0 += (a30-a33) * k03
va += 4;
vb += 16;
}
for (; k < L; k++)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _vb0 = _mm_loadu_ps(vb);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
va += 1;
vb += 4;
}
_mm_storeu_ps(output, _sum0);
#else
float sum[4] = {0};
int k = 0;
for (; k + 3 < L; k = k + 4)
{
for (int n = 0; n < 4; n++)
{
sum[n] += va[0] * vb[n];
sum[n] += va[1] * vb[n + 4];
sum[n] += va[2] * vb[n + 8];
sum[n] += va[3] * vb[n + 12];
//sum[n] += va[4] * vb[n+16];
//sum[n] += va[5] * vb[n+20];
//sum[n] += va[6] * vb[n+24];
//sum[n] += va[7] * vb[n+28];
}
va += 4;
vb += 16;
}
for (; k < L; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n] + bias0;
}
#endif // __SSE__
output += 4;
}
for (; j < N; j++)
{
const float* vb = bottom_tm.channel(j / 4 + j % 4);
const float* va = kernel_tm.channel(i / 4 + i % 4);
int k = 0;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < L; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
float sum0_tmp[4];
_mm_storeu_ps(sum0_tmp, _sum0);
float sum0 = bias0 + sum0_tmp[0] + sum0_tmp[1] + sum0_tmp[2] + sum0_tmp[3];
#else
float sum0 = bias0;
#endif // __SSE__
for (; k < L; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
}
#endif
|
tree_mesh_builder.h | /**
* @file tree_mesh_builder.h
*
* @author Simon Stupinsky <xstupi0000@stud.fit.vutbr.cz>
*
* @brief Parallel Marching Cubes implementation using OpenMP tasks + octree early elimination
*
* @date 15.12.2019 10:00
**/
#ifndef TREE_MESH_BUILDER_H
#define TREE_MESH_BUILDER_H
//#include <map>
#include "base_mesh_builder.h"
class TreeMeshBuilder : public BaseMeshBuilder {
public:
TreeMeshBuilder(unsigned gridEdgeSize);
protected:
unsigned octree(const ParametricScalarField &field, unsigned mGridSize, const Vec3_t<float> &pos);
unsigned marchCubes(const ParametricScalarField &field);
float evaluateFieldAt(const Vec3_t<float> &pos, const ParametricScalarField &field);
void emitTriangle(const Triangle_t &triangle);
const Triangle_t *getTrianglesArray() const { return mTriangles.data(); }
std::vector <Triangle_t> mTriangles; ///< Temporary array of triangles
// const Triangle_t *getTrianglesArray() const {
// unsigned size = 0;
// for (const auto &myPair : mTriangles) { size += myPair.second.size(); }
// auto* res = new Triangle_t[size];
// unsigned offset = 0;
//// #pragma omp parallel default(shared)
// for (int i = 0; i < mTriangles.size(); i++) {
// copyVectorToVector(mTriangles.at(i), res + offset);
// offset += mTriangles.at(i).size();
// }
// return res;
// };
//
// void static copyVectorToVector(const std::vector<Triangle_t>& src, Triangle_t* dst) {
// unsigned size = src.size();
// for (unsigned i = 0; i < size; i++) {
// dst[i] = src.at(i);
// }
// }
//
// std::map<int, std::vector<Triangle_t> > mTriangles;
};
#endif // TREE_MESH_BUILDER_H
|
ops.h | #pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#define MIN 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(T *x, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#include <helpers/sharedmem.h>
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer,T *result, Nd4jLong *resultShapeBuffer,T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxT : float,double,float16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minT : float,double,float16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(sumT : float,double,float16 : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0.0f)
#endif
namespace functions {
namespace indexreduce {
template<typename T>
struct IndexValue {
T value;
Nd4jLong index;
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template<typename T>
class Add {
public:
op_def static T op(T d1, T d2) {
return d1 + d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 + d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 + params[0];
}
op_def static T startingValue() {
return static_cast<T>(0.f);
}
};
template<typename T>
class Subtract {
public:
op_def static T op(T d1, T d2) {
return d1 - d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 - d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 - params[0];
}
};
template<typename T>
class SquaredSubtract {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f));
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f));
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_pow<T>(d1 - params[0], static_cast<T>(2.f));
}
};
template<typename T>
class ReverseSubtract {
public:
op_def static T op(T d1, T d2) {
return d2 - d1;
}
op_def static T op(T d1, T d2, T *params) {
return d2 - d1;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0] - d1;
}
};
template<typename T>
class LogPoisonLossFull {
public:
op_def static T op(T z, T c) {
return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)));
}
op_def static T op(T z, T c, T *params) {
return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)));
}
op_def static T op(T z) {
return (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z));
}
// op for MetaOps
op_def static T op(T z, T *params) {
return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)));
}
};
template<typename T>
class LogPoisonLoss {
public:
op_def static T op(T z, T c) {
return (nd4j::math::nd4j_exp<T>(c) - z * c);
}
op_def static T op(T z, T c, T *params) {
return (nd4j::math::nd4j_exp<T>(c) - z * c);
}
op_def static T op(T z) {
return (z);
}
// op for MetaOps
op_def static T op(T z, T *params) {
return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0]);
}
};
template<typename T>
class Multiply {
public:
op_def static T op(T d1, T d2) {
return d1 * d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 * d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 * params[0];
}
op_def static T startingValue() {
return static_cast<T>(1.f);
}
};
template<typename T>
class Divide {
public:
op_def static T op(T d1, T d2) {
return d1 / d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 / d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 / params[0];
}
op_def static T startingValue() {
return static_cast<T>(1.f);
}
};
template<typename T>
class SafeDivide {
public:
op_def static T op(T d1, T d2) {
if(d2 == static_cast<T>(0.f))
return static_cast<T>(0.f);
return d1 / d2;
}
op_def static T op(T d1, T d2, T *params) {
if(d2 == static_cast<T>(0.f))
return static_cast<T>(0.f);
return d1 / d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
if(params[0] == static_cast<T>(0.f))
return static_cast<T>(0.f);
return d1 / params[0];
}
};
template<typename T>
class FloorDiv {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_floor<T>(d1 / d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_floor<T>(d1 / d2);
}
op_def static T op(T d1) {
return nd4j::math::nd4j_floor<T>(d1);
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_floor<T>(d1 / params[0]);
}
};
template<typename T>
class TruncateDiv {
public:
op_def static T op(T d1, T d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<T>(i1 / i2);
}
op_def static T op(T d1, T d2, T *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<T>(i1 / i2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<T>(i1 / i2);
}
};
template<typename T>
class Remainder {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_remainder(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_remainder(d1, d2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_remainder(d1, params[0]);
}
};
template<typename T>
class FMod {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_fmod(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_fmod(d1, d2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_fmod(d1, params[0]);
}
};
template<typename T>
class FloorMod {
public:
op_def static T op(T d1, T d2) {
T m = nd4j::math::nd4j_fmod(d1, d2);;
return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2);
}
op_def static T op(T d1, T d2, T *params) {
T m = nd4j::math::nd4j_fmod(d1, d2);
return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
T m = nd4j::math::nd4j_fmod(d1, params[0]);
return (d1 < static_cast<T>(0.0f)) == (params[0] < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + params[0], params[0]);
}
};
template<typename T>
class ReverseDivide {
public:
op_def static T op(T d1, T d2) {
return d2 / d1;
}
op_def static T op(T d1, T d2, T *params) {
return d2 / d1;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0] / d1;
}
};
template<typename T>
class Copy {
public:
op_def static T op(T d1, T d2) {
return d2;
}
op_def static T op(T d1, T d2, T *params) {
return d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0];
}
};
template<typename T>
class Copy2 {
public:
op_def static T op(T d1, T d2) {
return d2;
}
op_def static T op(T d1, T d2, T *params) {
return d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0];
}
};
template<typename T>
class Axpy {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T alpha = params[0];
return alpha * d1 + d2;
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class And {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T comp = params[0];
return d1 != comp && d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return static_cast<T>(119.0f);
}
};
template<typename T>
class Or {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T comp = params[0];
return d1 != comp || d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return static_cast<T>(119.0f);
}
};
template<typename T>
class Xor {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T comp = params[0];
return ((d1 == comp && d2 != comp)||(d1 != comp && d2 == comp)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class Not {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T comp = params[0];
return d1 == comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class SetValOrLess {
public:
op_def static T op(T d1, T d2, T *params) {
if (d2 < d1) {
return d1;
}
return d2;
}
};
template<typename T>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static T op(T d1, T d2) {
return (int)d1 % (int)d2;
}
op_def static T op(T d1, T d2, T *params) {
return (int)d1 % (int)d2;
}
// op for MetaOp
op_def static T op(T d1, T *params) {
return (int)d1 % (int)params[0];
}
};
template<typename T>
class ReverseMod {
public:
op_def static T op(T d1, T d2) {
return (int)d2 % (int)d1;
}
op_def static T op(T d1, T d2, T *params) {
return (int)d2 % (int)d1;
}
// op for MetaOp
op_def static T op(T d1, T *params) {
return (int)params[0] % (int)d1;
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template<typename T>
class Epsilon {
public:
op_def static T op(T d1, T d2, T *params) {
T diff = d1 - d2;
T absDiff = nd4j::math::nd4j_abs<T>(diff);
if (absDiff <= static_cast<T>(MIN))
return static_cast<T>(1.0f);
return static_cast<T>(0.0f);
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class EqualTo {
public:
op_def static T op(T d1, T d2) {
return d1 == d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 == d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class NotEqualTo {
public:
op_def static T op(T d1, T d2) {
return d1 != d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 != d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class GreaterThanOrEqual {
public:
op_def static T op(T d1, T d2) {
return d1 >= d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 >= d2;
}
// FIXME: this signature clashes with MetaOp stuff
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class GreaterThan {
public:
op_def static T op(T d1, T d2) {
return d1 > d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 > d2;
}
// FIXME: this signature clashes with MetaOp stuff
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class LessThan {
public:
op_def static T op(T d1, T d2) {
return d1 < d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 < d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class LessThanOrEqual {
public:
op_def static T op(T d1, T d2) {
return d1 <= d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 <= d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class Abs {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_abs<T>(d1);
}
};
template<typename T>
class Ceiling {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_ceil<T>(d1);
}
};
template<typename T>
class Cosine {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_cos<T>(d1);
}
};
template<typename T>
class Exp {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_exp<T>(d1);
}
};
template<typename T>
class HardTanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return ((d1 >= static_cast<T>(-1.0f) && d1 <= static_cast<T>(1.0f)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f));
}
};
template<typename T>
class HardTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 < static_cast<T>(-1.0f))
return static_cast<T>(-1.0f);
else if (d1 > static_cast<T>(1.0f))
return static_cast<T>(1.0f);
else
return d1;
}
};
template<typename T>
class Floor {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_floor<T>(d1);
}
};
template<typename T>
class Log {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log<T>(d1);
}
};
template<typename T>
class Log1p {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log<T>(1+d1);
}
};
template<typename T>
class LogX {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log<T>(d1) / nd4j::math::nd4j_log<T>(params[0]) ;
}
};
template<typename T>
class StabilizeFP16 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 <= static_cast<T>(0.f)) return static_cast<T>(0.001f);
else return d1;
}
};
template<typename T>
class SpecialDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * (static_cast<T>(1.0f) - d1);
}
};
template<typename T>
class Neg {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return -d1;
}
};
template<typename T>
class Erf {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_erf<T>(d1);
}
};
template<typename T>
class Erfc {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_erfc<T>(d1);
}
};
template<typename T>
class Reciprocal {
public:
no_op_exec_special
no_op_exec_special_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static T op(T d1, T *params) {
return (static_cast<T>(1.0f)/d1);
}
};
template<typename T>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.f));
}
op_def static T op(T d1) {
return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f));
}
};
template<typename T>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_re<T>(d1, params[0]);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_re<T>(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_re<T>(d1, d2);
}
op_def static T op(T d1) {
return static_cast<T>(0.0f);
}
};
template<typename T>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T d2 = params[0];
T threshold = params[1];
return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1, T d2, T *params) {
T threshold = params[0];
return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return static_cast<T>(0.0f);
}
};
template<typename T>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T d2 = params[0];
T thresholdRelative = params[1];
T thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f);
}
op_def static T op(T d1, T d2, T *params) {
T thresholdRelative = params[0];
T thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return static_cast<T>(0.0f);
}
};
template<typename T>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_pow<T>(d1, params[0]);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_pow<T>(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_pow<T>(d1, d2);
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return params[0] * nd4j::math::nd4j_pow<T>(d1, params[0] - static_cast<T>(1.f));
}
op_def static T op(T d1, T d2) {
return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f));
}
op_def static T op(T d1, T d2, T *params) {
return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f));
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class Round {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_round<T>(d1);
}
};
template<typename T>
class IsNan {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Expm1 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_exp(d1) - static_cast<T>(1.0f);
}
};
template<typename T>
class IsInf {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isinf<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class IsInfOrNan{
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(0.0f) : static_cast<T>(1.0f);
}
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class IsFinite {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ClipByValue {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 > params[1])
return params[1];
else if (d1 < params[0])
return params[0];
else return d1;
}
};
template<typename T>
class Swish {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * nd4j::math::nd4j_sigmoid<T>(d1);
}
};
template<typename T>
class SwishDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T ex = nd4j::math::nd4j_pow<T>(static_cast<T>(M_E), d1);
return (ex * (d1 + ex + static_cast<T>(1.f))) / nd4j::math::nd4j_pow<T>((ex + static_cast<T>(1.f)) , static_cast<T>(2.0f));
}
};
template<typename T>
class LogSigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log(nd4j::math::nd4j_sigmoid<T>(d1));
}
};
template<typename T>
class LogSigmoidDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T ex = nd4j::math::nd4j_pow<T>(M_E, d1);
return static_cast<T>(1.f) / (ex + static_cast<T>(1.f));
}
};
template<typename T>
class Sigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sigmoid<T>(d1);
}
};
template<typename T>
class SigmoidDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sigmoidderivative<T>(d1);
}
};
template<typename T>
class HardSigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_min<T>(static_cast<T>(1.0f), nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), (static_cast<T>(0.2f)) * d1 + static_cast<T>(0.5f)));
}
};
template<typename T>
class HardSigmoidDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 < static_cast<T>(-2.5f) || d1 > static_cast<T>(2.5f) ? static_cast<T>(0.0f) : static_cast<T>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template<typename T>
class SetRange {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T min = params[0];
T max = params[1];
if (d1 >= min && d1 <= max)
return d1;
if (min == static_cast<T>(0.0f) && max == static_cast<T>(1.0f)) {
auto val = static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_exp<T>(-d1));
return (nd4j::math::nd4j_floor<T>(val * (max - min)) + min);
}
auto ret = (nd4j::math::nd4j_floor<T>(d1 * (max - min)) + min);
return ret;
}
};
template<typename T>
class Sin {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sin<T>(d1);
}
};
template<typename T>
class Square {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * d1;
}
};
template<typename T>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sqrt<T>(d1);
}
};
template<typename T>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f) / nd4j::math::nd4j_sqrt<T>(d1);
}
};
template<typename T>
class Rint {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_rint<T>(d1);
}
};
template<typename T>
class SoftPlus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::softplus<T>(d1);
}
};
template<typename T>
class Sign {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return (d1 > static_cast<T>(0.0f)) - (d1 < static_cast<T>(0.0f));
}
};
template<typename T>
class TimesOneMinus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * (static_cast<T>(1.0f) - d1);
}
};
template<typename T>
class RationalTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1;
auto tanh = nd4j::math::nd4j_sgn<T>(dis) * (static_cast<T>(1.0f) - (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.0f)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.0f)) )));
return static_cast<T>(1.7159f) * tanh;
}
};
template<typename T>
class RationalTanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1;
auto a = static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.f));
auto tDeriv = (static_cast<T>(1.0f) + nd4j::math::nd4j_sign<T>(dis) * (static_cast<T>(2.0f) * dis + static_cast<T>(4.0f) * static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(3.f)))) / (a * a);
return static_cast<T>(1.7159f) * (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * tDeriv;
}
};
template<typename T>
class Tanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_tanh<T>(d1);
}
};
template<typename T>
class RectifiedTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), nd4j::math::nd4j_tanh<T>(d1));
}
};
template<typename T>
class RectifiedTanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 > static_cast<T>(0.0f) ? nd4j::math::nd4j_tanhderivative<T>(d1) : static_cast<T>(0.0f);
}
};
template<typename T>
class ATanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_atanh<T>(d1);
}
};
template<typename T>
class TanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_tanhderivative<T>(d1);
}
};
template<typename T>
class Cube {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * d1 * d1;
}
};
template<typename T>
class CubeDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return 3 * d1 * d1;
}
};
template<typename T>
class ACos {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_acos<T>(d1);
}
};
template<typename T>
class ASinh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_asinh<T>(d1);
}
};
template<typename T>
class ASinhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(nd4j::math::nd4j_pow(d1, static_cast<T>(2.f)) + static_cast<T>(1.f)));
}
};
template<typename T>
class ACosh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_acosh<T>(d1);
}
};
template<typename T>
class ACoshDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(d1 - static_cast<T>(1.f)) * nd4j::math::nd4j_sqrt(d1 + static_cast<T>(1.f)));
}
};
template<typename T>
class Ones {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f);
}
};
template<typename T>
class SoftSign {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_softsign<T>(d1);
}
};
template<typename T>
class SoftSignDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_softsignderivative<T>(d1);
}
};
template<typename T>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static T op(T d1, T *extraParams) {
T compare = extraParams[0];
T eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
if (mode == 0) // equals
return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? 1.0 : 0.0;
else if (mode == 1) // not equals
return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? 1.0 : 0.0;
else if (mode == 2) // less_than
return d1 < compare? 1.0 : 0.0;
else if (mode ==3) // greater_than
return d1 > compare? 1.0 : 0.0;
else if (mode == 4) // less_or_equals_than
return d1 <= compare? 1.0 : 0.0;
else if (mode == 5) // greater_or_equals_than
return d1 >= compare? 1.0 : 0.0;
else if (mode == 6) // abs_less_than
return nd4j::math::nd4j_abs<T>(d1) < compare? 1.0 : 0.0;
else if (mode == 7) // abs_greater_than
return nd4j::math::nd4j_abs<T>(d1) > compare? 1.0 : 0.0;
else if (mode == 8) // is inf
return nd4j::math::nd4j_isinf(d1) ? 1.0 : 0.0;
else if (mode == 9) // is nan
return nd4j::math::nd4j_isnan(d1) ? 1.0 : 0.0;
else if (mode == 10)
return (d1 == compare) ? 1.0 : 0.0;
else if (mode == 11)
return (d1 != compare) ? 1.0 : 0.0;
else if (mode == 12) // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<T>(d1) >= compare? 1.0 : 0.0;
else if (mode == 13) // abs_less_or_equals_than
return nd4j::math::nd4j_abs<T>(d1) <= compare? 1.0 : 0.0;
else
printf("Undefined match condition: [%i]\n", mode);
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_elu<T>(d1);
}
};
template<typename T>
class ELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_eluderivative<T>(d1);
}
};
template<typename T>
class RELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 < params[0] ? params[0] : d1;
}
};
template<typename T>
class RELU6 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T relu = d1 < params[0] ? params[0] : d1;
return relu < static_cast<T>(6.f) ? relu : static_cast<T>(6.f);
}
};
template<typename T>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_leakyrelu<T>(d1, params[0]);
}
};
template<typename T>
class SELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) * d1 : static_cast<T>(SELU_LAMBDA) * (static_cast<T>(SELU_ALPHA) * nd4j::math::nd4j_exp<T>(d1) - static_cast<T>(SELU_ALPHA));
}
};
template<typename T>
class SELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) : static_cast<T>(SELU_ALPHA) * static_cast<T>(SELU_LAMBDA) * nd4j::math::nd4j_exp<T>(d1);
}
};
template<typename T>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 >= static_cast<T>(0.0f))
return static_cast<T>(1.0f);
else
return params[0];
}
};
template<typename T>
class ASin {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_asin<T>(d1);
}
};
template<typename T>
class Sinh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sinh<T>(d1);
}
};
template<typename T>
class SinhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_cosh<T>(d1);
}
};
template<typename T>
class Cosh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_cosh<T>(d1);
}
};
template<typename T>
class Tan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_tan<T>(d1);
}
};
template<typename T>
class TanDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f) / nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_cos<T>(d1), static_cast<T>(2.0f));
}
};
template<typename T>
class ATan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_atan(d1);
}
};
template<typename T>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_atan2<T>(d2, d1);
}
};
template<typename T>
class Identity {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class Stabilize {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T k = params[0];
if (d1 * k > static_cast<T>(- MIN_CUTFOFF))
return static_cast<T>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<T>(MIN_CUTFOFF))
return static_cast<T>(MIN_CUTFOFF) / k;
return d1;
}
};
template<typename T>
class Step {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return (d1 > params[0] ? static_cast<T>(1.0f) : static_cast<T>(0.0f));
}
};
template<typename T>
class OneMinus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f) - d1;
}
};
template<typename T>
class Sum {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)) * nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)));
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return -reduction;
}
};
template<typename T>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)));
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 * nd4j::math::nd4j_log<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ASum {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction);
}
};
template<typename T>
class CountNonZero {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 == static_cast<T>(0.0f) ? static_cast<T>(0.0f) : static_cast<T>(1.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class CountZero {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 == static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Prod {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(1.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f) ;
}
};
template<typename T>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(1.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction / (int) n;
}
};
template<typename T>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction) / static_cast<T>(n);
}
};
template<typename T>
class Max {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(old, opOutput);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(opOutput, old);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_max<T>(d1, d2);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_max<T>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class AMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput));
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old));
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2));
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_abs<T>(d1) > nd4j::math::nd4j_abs<T>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction);
}
};
template<typename T>
class AMin {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput));
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old));
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_min(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2));
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_abs<T>(d1) < nd4j::math::nd4j_abs<T>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction);
}
};
template<typename T>
class Min {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(old, opOutput);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(opOutput, old);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_min(d1, d2);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_min(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
op_def static T op(T d1, T *extraParams) {
return d1 * d1;
}
};
template<typename T>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 * d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
T v = nd4j::math::nd4j_abs(d1);
return v * v;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
};
template<typename T>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_pow(nd4j::math::nd4j_abs(d1), extraParams[0]);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_pow(reduction, static_cast<T>(1.0f) / extraParams[0]);
}
};
template<typename T>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old),
nd4j::math::nd4j_abs<T>(opOutput));
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(reduction),
nd4j::math::nd4j_abs<T>(reduction));
}
};
template<typename T>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T op(T d1, T *extraParams) {
T mean = extraParams[0];
T ret = d1 - mean;
return ret * ret;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return reduction / static_cast<T>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template<typename T>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T op(T d1, T *extraParams) {
T mean = extraParams[0];
T ret = d1 - mean;
return ret * ret;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
T ret = Variance<T>::postProcess(reduction, n, extraParams);
T sqrtRet = nd4j::math::nd4j_sqrt<T>(ret);
return sqrtRet;
}
};
template<typename T>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1]));
}
op_def static T op(T d1, T d2, T *extraParams) {
extraParams[0] += d1 * d1;
extraParams[1] += d2 * d2;
return (d1 * d2);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<T>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<T>(d2 * d2));
return (d1 * d2);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
template<typename T>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
// num / denom
return (static_cast<T>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static T num(T d1, T d2) {
return nd4j::math::nd4j_min<T>(d1, d2);
}
op_def static T denom(T d1, T d2) {
return nd4j::math::nd4j_max<T>(d1, d2);
}
op_def static T op(T d1, T d2, T *extraParams) {
extraParams[0] += num(d1, d2);
extraParams[1] += denom(d1, d2);
return static_cast<T>(0.0f);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<T>(0.0f);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
template<typename T>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return static_cast<T>(reduction / n);
}
op_def static T op(T d1, T d2, T *extraParams) {
return (d1 == d2) ? static_cast<T>(0.0f) : static_cast<T>(1.0f);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
template<typename T>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return (static_cast<T>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1])));
}
op_def static T op(T d1, T d2, T *extraParams) {
extraParams[0] += nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1);
extraParams[1] += nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2);
return (d1 * d2);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2));
return (d1 * d2);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template<typename T>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return reduction;
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
return d1 * d2;
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return opOutput + old;
}
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template<typename T>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return reduction;
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
T eps = extraParamsRef[2];
T diff = nd4j::math::nd4j_abs<T>(d1 - d2);
// works well except in the range of very large numbers
if (diff <= eps)
return static_cast<T>(0.f);
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max(nd4j::math::nd4j_abs(d1), nd4j::math::nd4j_abs(d2)) * eps)
return static_cast<T>(0.f);
return static_cast<T>(1.f);
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return opOutput + old;
}
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {}
};
template<typename T>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
T ret = d1 - d2;
return ret * ret;
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return opOutput + old;
}
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {}
};
template<typename T>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return reduction;
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
return nd4j::math::nd4j_abs<T>(d1 - d2);
}
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template<typename T>
class IndexAbsoluteMax {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return nd4j::math::nd4j_abs<T>(val);
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value);
old.value = nd4j::math::nd4j_abs<T>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (nd4j::math::nd4j_abs<T>(f1.value) > nd4j::math::nd4j_abs<T>(f2.value))
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return MIN_FLOAT;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class FirstIndex {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<T>(0.0f))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return - nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
};
template<typename T>
class LastIndex {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams);
if (res == static_cast<T>(0.0f))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return -nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
};
template<typename T>
class IndexMax {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return -nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class IndexAbsoluteMin {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(
functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value);
old.value = nd4j::math::nd4j_abs<T>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (nd4j::math::nd4j_abs<T>(f1.value) < nd4j::math::nd4j_abs<T>(f2.value))
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class IndexMin {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(
functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class SummaryStatsVariance {
public:
static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) {
if (biasCorrected) {
T ret = val.varianceBiasCorrected();
if (ret < static_cast<T>(0.0f))
return val.variance();
return ret;
}
return val.variance();
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) {
return d1;
}
};
template<typename T>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) {
if (biasCorrected) {
T ret = val.varianceBiasCorrected();
if (ret < static_cast<T>(0.0f))
return nd4j::math::nd4j_sqrt(val.variance());
else
return nd4j::math::nd4j_sqrt(ret);
}
return nd4j::math::nd4j_sqrt(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) {
return d1;
}
};
template<typename T>
class DropOut {
public:
no_op_exec_special
no_op_exec_special_cuda
inline _CUDA_D static T op(T d1, T *params) {
T prob = params[0];
#ifdef __CUDACC__
T length = params[1];
T tid = gridDim.x * blockDim.x + threadIdx.x;
T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid)));
#else
T rnd = static_cast<T>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<T>(0.0f) : d1;
}
};
template<typename T>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static T op(T d1, T *params) {
T prob = params[0];
#ifdef __CUDACC__
T length = params[1];
T tid = gridDim.x * blockDim.x + threadIdx.x;
T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid)));
#else
T rnd = static_cast<T>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<T>(0.0f) : d1 / prob;
}
};
template<typename T>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T replacement = params[0];
return nd4j::math::nd4j_isnan(d1) ? replacement : d1 ;
}
};
// this op is used for conditional pairwise transforms only
template<typename T>
class CompareAndReplace{
public:
no_op_exec_special
no_op_exec_special_cuda
// op definition for PairWise Transform
op_def static T op(T d1, T d2, T *params) {
T compare = params[0];
T eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than eps
if (d1 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<T>(d1) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<T>(d1) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return d2;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template<typename T>
class CompareAndSet {
public:
no_op_exec_special
no_op_exec_special_cuda
// op definition for Transform
op_def static T op(T d1, T *params) {
T compare = params[0];
T set = params[1];
T eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<T>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<T>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
// op definition for PairWise Transform
op_def static T op(T d1, T d2, T *params) {
T compare = params[0];
T eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<T>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<T>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<T>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<T>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
GB_unop__identity_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_fp32)
// op(A') function: GB (_unop_tran__identity_fp64_fp32)
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_fp32)
(
double *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bd.c | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h> // access
#include <math.h>
#include <assert.h>
#include "timer.h"
#include "bd.h"
#include <omp.h>
#define NTHREADS 24
#define M_PI 3.14159265358979323846
#define my_EPS 0.000000001
void get_indices(int index, int *i, int *j, int *k, int b){
int ib, ib2;
ib = index%(b); ib2 = index%(b*b);
*k = ib;
*i = (index-ib2)/(b*b);
*j = (ib2-*k)/b;
return;
}
struct box
{
int head;
};
// it is possible to use smaller boxes and more complex neighbor patterns
#define NUM_BOX_NEIGHBORS 14
int box_neighbors[NUM_BOX_NEIGHBORS][3] =
{
{-1,-1,-1},
{-1,-1, 0},
{-1,-1,+1},
{-1, 0,-1},
{-1, 0, 0},
{-1, 0,+1},
{-1,+1,-1},
{-1,+1, 0},
{-1,+1,+1},
{ 0,-1,-1},
{ 0,-1, 0},
{ 0,-1,+1},
{ 0, 0,-1},
{ 0, 0, 0} // will calculate within the box interactions
};
int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const)
{
// Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!!
double krepul = 100, a=1, a_sq, phi=0.2, f;
a_sq = a*a;
int boxdim;// boxdim is number of cells in L
double cutoff2; int numpairs_p;
cutoff2 = 4;// cutoff < L/boxdim
boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8);
printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim);
struct box b[boxdim][boxdim][boxdim];
struct box *bp;
struct box *neigh_bp;
// box indices
int idx, idy, idz, index, box2, ib2;
int neigh_idx, neigh_idy, neigh_idz;
// allocate implied linked list
int p1, p2, j, i;
double d2, dx, dy, dz, s;
box2 = boxdim*boxdim;
//*****************************************END initialisations***********************************
if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim))
{
printf("interactions: bad input parameters\n");
// return 1;
}
double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0;
for (int step=0; step<INTERVAL_LEN; step++)
{
// Calculation of interaction per time step
t0 = time_in_seconds();
// allocate memory for particles in each box
// #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2)
// for (index=0; index<boxdim*box2; index++){
// idz = index%(boxdim);
// ib2 = index%(box2);
// idx = (index-ib2)/(box2);
// idy = (ib2-idz)/boxdim;
// b[idx][idy][idz].head=-1;
// }
for (idx=0; idx<boxdim; idx++){
for (idy=0; idy<boxdim; idy++){
for (idz=0; idz<boxdim; idz++){
b[idx][idy][idz].head=-1;
}
}
}
t_init_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
// traverse all particles and assign to boxes
#pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS)
for (i=0; i<npos; i++)
{
if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i] = L-fmod(-1*pos_orig[3*i], L);
}
if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L);
}
if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L);
}
if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);}
// initialize entry of implied linked list
next[i] = -1;
forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step
// which box does the particle belong to?
// assumes particles have positions within [0,L]^3
idx = (int)(pos[3*i ]/L*boxdim);
idy = (int)(pos[3*i+1]/L*boxdim);
idz = (int)(pos[3*i+2]/L*boxdim);
// add to beginning of implied linked list
bp = &b[idx][idy][idz];
// next[i] = bp->head; // next = previous (my notation)
// #pragma omp critical
// {
next[i] = bp->head; // next = previous (my notation)
bp->head = i; // head = latest (my notation)
// }
}
t_assign_to_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
#pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS)
for (index=0; index<boxdim*box2; index++){
idz = index%(boxdim);
ib2 = index%(box2);
idx = (index-ib2)/(box2);
idy = (ib2-idz)/boxdim;
bp = &b[idx][idy][idz];
// interactions within and other boxes
#pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS)
for (j=0; j<NUM_BOX_NEIGHBORS; j++)
{
neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim;
neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim;
neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim;
neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz];
// when using boxes, the minimum image computation is
// known beforehand, thus we can compute position offsets
// to compensate for wraparound when computing distances
double xoffset = 0.;
double yoffset = 0.;
double zoffset = 0.;
if (idx + box_neighbors[j][0] == -1) xoffset = -L;
if (idy + box_neighbors[j][1] == -1) yoffset = -L;
if (idz + box_neighbors[j][2] == -1) zoffset = -L;
if (idx + box_neighbors[j][0] == boxdim) xoffset = L;
if (idy + box_neighbors[j][1] == boxdim) yoffset = L;
if (idz + box_neighbors[j][2] == boxdim) zoffset = L;
// NOTE: modifying the function to update the forces
p1 = neigh_bp->head;
while (p1 != -1)
{
p2 = bp->head;
while (p2 != -1)
{
// compute distance vector
dx = pos[3*p1+0] - pos[3*p2+0] + xoffset;
dy = pos[3*p1+1] - pos[3*p2+1] + yoffset;
dz = pos[3*p1+2] - pos[3*p2+2] + zoffset;
d2 = dx*dx+dy*dy+dz*dz+my_EPS;
if ( d2<4.0*a_sq)
{
s = sqrt(d2);
f = krepul*(2*a-s);
#pragma omp atomic
forces[3*p1+0] += f*dx/s;
#pragma omp atomic
forces[3*p1+1] += f*dy/s;
#pragma omp atomic
forces[3*p1+2] += f*dz/s;
#pragma omp atomic
forces[3*p2+0] -= f*dx/s;
#pragma omp atomic
forces[3*p2+1] -= f*dy/s;
#pragma omp atomic
forces[3*p2+2] -= f*dz/s;
}
p2 = next[p2];
}
p1 = next[p1];
}
}
}
t_force += time_in_seconds() - t0;
t0 = time_in_seconds();
// generate random values from standard normal distribution
// note: this MKL function is sequential but vectorized
vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.);
// update positions with Brownian displacements
#pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS)
for (int i=0; i<3*npos; i++)
{
pos_orig[i] += forces[i]*DELTAT+f_const*buf[i];
}
t_update_pos += time_in_seconds() - t0;
}
printf("--------------------------------------------------------\n");
printf("Time: %f for initiating the cell head \n", t_init_cells);
printf("Time: %f for assigning particles to cells \n", t_assign_to_cells);
printf("Time: %f for force calculations \n", t_force);
printf("Time: %f for pos update \n", t_update_pos);
printf("--------------------------------------------------------\n");
return 0;
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 3;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 3;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
GB_unaryop__minv_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int32_int64
// op(A') function: GB_tran__minv_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int32_int64
(
int32_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrixstrassen.h | // @file matrixstrassen.h matrix strassen operations.
// @author TPOC: contact@palisade-crypto.org
//
// @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. THIS SOFTWARE IS
// PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H
#define LBCRYPTO_MATH_MATRIXSTRASSEN_H
#include <assert.h>
#include <memory>
#include <vector>
#include "math/matrix.h"
namespace lbcrypto {
template <class Element>
class MatrixStrassen { // FIXME : public Serializable {
public:
typedef vector<vector<Element>> data_t;
typedef vector<Element> lineardata_t;
typedef typename vector<Element>::iterator it_lineardata_t;
typedef std::function<Element(void)> alloc_func;
/**
* Constructor that initializes matrix values using a zero allocator
*
* @param &allocZero lambda function for zero initialization.
* @param &rows number of rows.
* @param &rows number of columns.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols)
: data(), rows(rows), cols(cols), allocZero(allocZero) {
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Constructor that initializes matrix values using a distribution generation
* allocator
*
* @param &allocZero lambda function for zero initialization (used for
* initializing derived matrix objects)
* @param &rows number of rows.
* @param &rows number of columns.
* @param &allocGen lambda function for intialization using a distribution
* generator.
*/
MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols,
alloc_func allocGen);
/**
* Constructor of an empty matrix; SetSize must be called on this matrix to
* use it Basically this exists to support deserializing
*
* @param &allocZero lambda function for zero initialization.
*/
explicit MatrixStrassen(alloc_func allocZero)
: data(), rows(0), cols(0), allocZero(allocZero) {}
void SetSize(size_t rows, size_t cols) {
if (this->rows != 0 || this->cols != 0) {
PALISADE_THROW(not_available_error,
"You cannot SetSize on a non-empty matrix");
}
this->rows = rows;
this->cols = cols;
data.resize(rows);
for (auto row = data.begin(); row != data.end(); ++row) {
for (size_t col = 0; col < cols; ++col) {
row->push_back(allocZero());
}
}
}
/**
* Copy constructor
*
* @param &other the matrix object to be copied
*/
MatrixStrassen(const MatrixStrassen<Element>& other)
: data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) {
deepCopyData(other.data);
}
/**
* Assignment operator
*
* @param &other the matrix object whose values are to be copied
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& operator=(
const MatrixStrassen<Element>& other);
/**
* In-place change of the current matrix to a matrix of all ones
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Ones();
/**
* Fill matrix using the same element
*
* @param &val the element the matrix is filled by
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Fill(const Element& val);
/**
* In-place change of the current matrix to Identity matrix
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& Identity();
/**
* Sets the first row to be powers of two
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> GadgetVector(int32_t base = 2) const;
/**
* Computes the infinity norm
*
* @return the norm in double format
*/
inline double Norm() const;
/**
* Operator for matrix multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(
MatrixStrassen<Element> const& other) const {
return Mult(other);
}
/**
* Multiplication of matrix by a scalar
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> ScalarMult(Element const& other) const {
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t col = 0; col < result.cols; ++col) {
for (int32_t row = 0; row < result.rows; ++row) {
*result.data[row][col] = *result.data[row][col] * other;
}
}
return result;
}
/**
* Operator for scalar multiplication
*
* @param &other the multiplier element
* @return the result of multiplication
*/
inline MatrixStrassen<Element> operator*(Element const& other) const {
return ScalarMult(other);
}
/**
* Equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool Equal(MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
return false;
}
for (size_t i = 0; i < rows; ++i) {
for (size_t j = 0; j < cols; ++j) {
if (data[i][j] != other.data[i][j]) {
return false;
}
}
}
return true;
}
/**
* Operator for equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator==(MatrixStrassen<Element> const& other) const {
return Equal(other);
}
/**
* Operator for non-equality check
*
* @param &other the matrix object to compare to
* @return the boolean result
*/
inline bool operator!=(MatrixStrassen<Element> const& other) const {
return !Equal(other);
}
/**
* Get property to access the data as a vector of vectors
*
* @return the data as vector of vectors
*/
const data_t& GetData() const { return data; }
/**
* Get property to access the number of rows in the matrix
*
* @return the number of rows
*/
size_t GetRows() const { return rows; }
/**
* Get property to access the number of columns in the matrix
*
* @return the number of columns
*/
size_t GetCols() const { return cols; }
/**
* Get property to access the zero allocator for the matrix
*
* @return the lambda function corresponding to the element zero allocator
*/
alloc_func GetAllocator() const { return allocZero; }
/**
* Sets the evaluation or coefficient representation for all ring elements
* that support the SetFormat method
*
* @param &format the enum value corresponding to coefficient or evaluation
* representation
*/
void SetFormat(Format format);
/**
* MatrixStrassen addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Add(
MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
PALISADE_THROW(math_error,
"Addition operands have incompatible dimensions");
}
MatrixStrassen<Element> result(*this);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] += *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix addition
*
* @param &other the matrix to be added
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator+(
MatrixStrassen<Element> const& other) const {
return this->Add(other);
}
/**
* Operator for in-place addition
*
* @param &other the matrix to be added
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator+=(
MatrixStrassen<Element> const& other);
/**
* MatrixStrassen substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Sub(
MatrixStrassen<Element> const& other) const {
if (rows != other.rows || cols != other.cols) {
PALISADE_THROW(math_error,
"Subtraction operands have incompatible dimensions");
}
MatrixStrassen<Element> result(allocZero, rows, other.cols);
#pragma omp parallel for
for (int32_t j = 0; j < cols; ++j) {
for (int32_t i = 0; i < rows; ++i) {
*result.data[i][j] = *data[i][j] - *other.data[i][j];
}
}
return result;
}
/**
* Operator for matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix
*/
inline MatrixStrassen<Element> operator-(
MatrixStrassen<Element> const& other) const {
return this->Sub(other);
}
/**
* Operator for in-place matrix substraction
*
* @param &other the matrix to be substracted
* @return the resulting matrix (same object)
*/
inline MatrixStrassen<Element>& operator-=(
MatrixStrassen<Element> const& other);
/**
* MatrixStrassen transposition
*
* @return the resulting matrix
*/
inline MatrixStrassen<Element> Transpose() const;
// YSP The signature of this method needs to be changed in the future
/**
* MatrixStrassen determinant - found using Laplace formula with complexity
* O(d!), where d is the dimension
*
* @param *result where the result is stored
*/
inline void Determinant(Element* result) const;
/**
* Cofactor matrix - the matrix of determinants of the minors A_{ij}
* multiplied by -1^{i+j}
*
* @return the cofactor matrix for the given matrix
*/
inline MatrixStrassen<Element> CofactorMatrixStrassen() const;
/**
* Add rows to bottom of the matrix
*
* @param &other the matrix to be added to the bottom of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other);
/**
* Add columns the right of the matrix
*
* @param &other the matrix to be added to the right of current matrix
* @return the resulting matrix
*/
inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other);
/**
* MatrixStrassen indexing operator - writeable instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element& operator()(size_t row, size_t col) { return data[row][col]; }
/**
* MatrixStrassen indexing operator - read-only instance of the element
*
* @param &row row index
* @param &col column index
* @return the element at the index
*/
inline Element const& operator()(size_t row, size_t col) const {
return data[row][col];
}
/**
* MatrixStrassen row extractor
*
* @param &row row index
* @return the row at the index
*/
inline MatrixStrassen<Element> ExtractRow(size_t row) const {
MatrixStrassen<Element> result(this->allocZero, 1, this->cols);
int i = 0;
for (auto elem = this->GetData()[row].begin();
elem != this->GetData()[row].end(); ++elem) {
result(0, i) = **elem;
i++;
}
return result;
// return *this;
}
/**
* Call switch format for each (ring) element
*
*/
inline void SwitchFormat();
/**
* MatrixStrassen multiplication
*
* @param &other the multiplier matrix
* @return the result of multiplication
*/
MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other,
int nrec = 0, int pad = -1) const;
/*
* Multiply the matrix by a vector whose elements are all 1's. This causes
* the elements of each row of the matrix to be added and placed into the
* corresponding position in the output vector.
*/
MatrixStrassen<Element> MultByUnityVector() const;
/*
* Multiply the matrix by a vector of random 1's and 0's, which is the same as
* adding select elements in each row together. Return a vector that is a rows
* x 1 matrix.
*/
MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const;
private:
struct MatDescriptor {
int lda;
int nrec;
int nproc;
int nprocr;
int nprocc;
int nproc_summa;
int bs;
};
const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor
const int rank = 0, base = 0;
mutable data_t data;
size_t rows;
mutable int rowpad = 0;
size_t cols;
mutable int colpad = 0;
alloc_func allocZero;
mutable char* pattern = nullptr;
mutable int numAdd = 0;
mutable int numMult = 0;
mutable int numSub = 0;
mutable MatDescriptor desc;
mutable Element zeroUniquePtr = allocZero();
mutable int NUM_THREADS = 1;
void multiplyInternalCAPS(it_lineardata_t A, it_lineardata_t B,
it_lineardata_t C, MatDescriptor desc,
it_lineardata_t work) const;
void strassenDFSCAPS(it_lineardata_t A, it_lineardata_t B, it_lineardata_t C,
MatDescriptor desc,
it_lineardata_t workPassThrough) const;
void block_multiplyCAPS(it_lineardata_t A, it_lineardata_t B,
it_lineardata_t C, MatDescriptor d,
it_lineardata_t workPassThrough) const;
void LinearizeDataCAPS(lineardata_t* lineardataPtr) const;
void UnlinearizeDataCAPS(lineardata_t* lineardataPtr) const;
int getRank() const;
void verifyDescriptor(MatDescriptor desc);
long long numEntriesPerProc(MatDescriptor desc) const;
// deep copy of data - used for copy constructor
void deepCopyData(data_t const& src);
void getData(const data_t& Adata, const data_t& Bdata, const data_t& Cdata,
int row, int inner, int col) const;
void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A,
it_lineardata_t B) const;
void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A,
it_lineardata_t B) const;
void addMatricesCAPS(int numEntries, it_lineardata_t C, it_lineardata_t A,
it_lineardata_t B) const;
void addSubMatricesCAPS(int numEntries, it_lineardata_t T1,
it_lineardata_t S11, it_lineardata_t S12,
it_lineardata_t T2, it_lineardata_t S21,
it_lineardata_t S22) const;
void subMatricesCAPS(int numEntries, it_lineardata_t C, it_lineardata_t A,
it_lineardata_t B) const;
void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1,
it_lineardata_t S11, it_lineardata_t S12,
it_lineardata_t T2, it_lineardata_t S21,
it_lineardata_t S22, it_lineardata_t T3,
it_lineardata_t S31, it_lineardata_t S32) const;
void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1,
it_lineardata_t S11, it_lineardata_t S12,
it_lineardata_t T2, it_lineardata_t S21,
it_lineardata_t S22, it_lineardata_t T3,
it_lineardata_t S31, it_lineardata_t S32) const;
void distributeFrom1ProcCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I) const;
void collectTo1ProcCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I) const;
void sendBlockCAPS(int rank, int target, it_lineardata_t O, int bs,
int source, it_lineardata_t I, int ldi) const;
void receiveBlockCAPS(int rank, int target, it_lineardata_t O, int bs,
int source, it_lineardata_t I, int ldo) const;
void distributeFrom1ProcRecCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I, int ldi) const;
void collectTo1ProcRecCAPS(MatDescriptor desc, it_lineardata_t O,
it_lineardata_t I, int ldo) const;
};
/**
* Operator for scalar multiplication of matrix
*
* @param &e element
* @param &M matrix
* @return the resulting matrix
*/
template <class Element>
inline MatrixStrassen<Element> operator*(Element const& e,
MatrixStrassen<Element> const& M) {
return M.ScalarMult(e);
}
/**
* Generates a matrix of rotations. See pages 7-8 of
* https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat);
/**
* Each element becomes a square matrix with columns of that element's
* rotations in coefficient form. See pages 7-8 of
* https://eprint.iacr.org/2013/297
*
* @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated
* @return the resulting matrix of big binary integers
*/
inline MatrixStrassen<BigVector> RotateVecResult(
MatrixStrassen<Poly> const& inMat);
/**
* Stream output operator
*
* @param &os stream
* @param &m matrix to be outputted
* @return the chained stream
*/
template <class Element>
inline std::ostream& operator<<(std::ostream& os,
const MatrixStrassen<Element>& m);
/**
* Gives the Choleshky decomposition of the input matrix.
* The assumption is that covariance matrix does not have large coefficients
* because it is formed by discrete gaussians e and s; this implies int32_t can
* be used This algorithm can be further improved - see the Darmstadt paper
* section 4.4 http://eprint.iacr.org/2013/297.pdf
*
* @param &input the matrix for which the Cholesky decomposition is to be
* computed
* @return the resulting matrix of floating-point numbers
*/
inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t>& input);
/**
* Convert a matrix of integers from BigInteger to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(
const MatrixStrassen<BigInteger>& input, const BigInteger& modulus);
/**
* Convert a matrix of BigVector to int32_t
* Convert from Z_q to [-q/2, q/2]
*
* @param &input the input matrix
* @param &modulus the ring modulus
* @return the resulting matrix of int32_t
*/
inline MatrixStrassen<int32_t> ConvertToInt32(
const MatrixStrassen<BigVector>& input, const BigInteger& modulus);
/**
* Split a vector of int32_t into a vector of ring elements with ring dimension
* n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(
MatrixStrassen<int32_t> const& other, size_t n,
const shared_ptr<ILParams> params);
/**
* Another method for splitting a vector of int32_t into a vector of ring
* elements with ring dimension n
*
* @param &other the input matrix
* @param &n the ring dimension
* @param ¶ms Poly element params
* @return the resulting matrix of Poly
*/
inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(
MatrixStrassen<int32_t> const& other, size_t n,
const shared_ptr<ILParams> params);
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundFactor(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
factor;
Image
*edge_image;
MagickPixelPacket
background,
pixel;
RectangleInfo
edge_geometry;
register const PixelPacket
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,p,(IndexPacket *) NULL,&background);
artifact=GetImageArtifact(image,"trim:background-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&background,exception);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
factor=0.0;
GetMagickPixelPacket(edge_image,&pixel);
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
SetMagickPixelPacket(edge_image,p,(IndexPacket *) NULL,&pixel);
if (IsMagickColorSimilar(&pixel,&background) == MagickFalse)
factor++;
p++;
}
}
factor/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(factor);
}
static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge)
{
double
factor;
factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(factor);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_factor,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_factor=GetMinEdgeBackgroundFactor(&edge);
for ( ; background_factor < percent_background;
background_factor=GetMinEdgeBackgroundFactor(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_factor) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_factor) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_factor) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_factor) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
atDepth=MagickTrue;
if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->matte != MagickFalse))
type=GrayscaleMatteType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
magick_unreferenced(exception);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
magick_unreferenced(exception);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].red),range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].green),range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].blue),range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].opacity),range),
range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelRed(q)),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelGreen(q)),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelBlue(q)),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelOpacity(q)),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace);
(void) NormalizeImage(image);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->dither_method=NoDitherMethod;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(MagickFalse);
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
yescrypt-simd.c | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256_Y.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform.c"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define OUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
OUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
uint8_t sha256[32];
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX_Y ctx;
SHA256_Init_Y(&ctx);
SHA256_Update_Y(&ctx, passwd, passwdlen);
SHA256_Final_Y(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX_Y ctx;
HMAC_SHA256_Init_Y(&ctx, buf, buflen);
#if 0
/* Proper yescrypt */
HMAC_SHA256_Update_Y(&ctx, "Client Key", 10);
#else
/* GlobalBoost-Y buggy yescrypt */
HMAC_SHA256_Update_Y(&ctx, salt, saltlen);
#endif
HMAC_SHA256_Final_Y(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX_Y ctx;
SHA256_Init_Y(&ctx);
SHA256_Update_Y(&ctx, sha256, sizeof(sha256));
SHA256_Final_Y(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
2.c | /*
Написать программу, в которой определить две параллельные области, выполнение которых
зависит от условного оператора #pragma omp parallel if(...), если заданное значение числа
нитей больше 1, параллельная область выполняется, иначе не выполняется. Число нитей
перед первой областью задать равным 3, перед второй – равным 1. Внутри параллельных
областей определить количество нитей и номер каждой нити, результат выдать на экран.
Убедиться в правильности работы программы.
*/
#include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[])
{
omp_set_dynamic(0);
omp_set_num_threads(3);
#pragma omp parallel if (omp_get_max_threads() > 1)
{
if (omp_in_parallel())
printf("Нить номер %d, Количество нитей: %d\n", omp_get_thread_num(), omp_get_num_threads());
}
omp_set_num_threads(1);
#pragma omp parallel if (omp_get_max_threads() > 1)
{
if (omp_in_parallel())
printf("Нить номер %d, Количество нитей: %d\n", omp_get_thread_num(), omp_get_num_threads());
}
return 0;
}
|
ompfor8.c | extern double f (double, double);
void foo(int nthreads, int size, int numiter, double *V, int totalSize)
{
int i, iter;
for(iter=0; iter<numiter; iter++) {
#pragma omp parallel for default(none) shared(V,totalSize) private(i) schedule(static) ordered
for (i=0; i<totalSize-1; i++) {
V[i] = f(V[i],V[i-1]);
}
}
}
|
GB_binop__bclr_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int64)
// C=scalar+B GB (_bind1st__bclr_int64)
// C=scalar+B' GB (_bind1st_tran__bclr_int64)
// C=A+scalar GB (_bind2nd__bclr_int64)
// C=A'+scalar GB (_bind2nd_tran__bclr_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITCLR (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bclr_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(4*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t3+Nx,2048),floord(Nt+Nx-4,2048)),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
helloMP2.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[]) {
int th_id, nthreads;
#pragma omp parallel private(th_id)
{
th_id = omp_get_thread_num();
printf("Hello World from thread %d\n", th_id);
#pragma omp barrier
if ( th_id == 0 ) {
nthreads = omp_get_num_threads();
printf("There are %d threads\n",nthreads);
}
}
return EXIT_SUCCESS;
}
|
GB_binop__second_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_bool)
// A.*B function (eWiseMult): GB (_AemultB_01__second_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__second_bool)
// A.*B function (eWiseMult): GB (_AemultB_03__second_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_bool)
// A*D function (colscale): GB (_AxD__second_bool)
// D*A function (rowscale): GB (_DxB__second_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__second_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__second_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = bij
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_BOOL || GxB_NO_SECOND_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
matrix_multiply.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
int multiply_parallel(int m, int n, int p, int a[][600], int b[][600], int c[][600])
{
int i,j,k;
#pragma omp parallel shared(a,b,c) private(i,j,k)
{
#pragma omp for
for (i=0; i<m; i=i+1)
{
for (j=0; j<n; j=j+1)
{
a[i][j]=0.;
for (k=0; k<p; k=k+1)
{
a[i][j]=(a[i][j])+((b[i][k])*(c[k][j]));
}
}
}
}
return 0;
}
int multiply_serial(int m, int n, int p, int a[600][600], int b[][600], int c[][600])
{
int i, j, k ;
for (i=0; i<m; i++)
{
for (j=0; j<n; j++)
{
a[i][j]=0;
for (k=0; k<p; k++)
{
a[i][j] += b[i][k]*c[k][j] ;
}
}
}
return 0;
}
int main()
{
clock_t start, end;
double serial_time, parallel_time;
int m, n, p, i, j;
m = 600;
n = 600;
p = 600;
int a[600][600];// = {{1, 2, 3, 4, 600}, {3, 4, 600, 6, 7}}; // Dimensions m x n
int b[600][600];// = {{600, 6, 7, 8, 9}, {7, 8, 9, 600, 3}}; // Dimensions n x p
int serial_mul[600][600], parallel_mul[600][600];
for (i=0;i<m;i++)
{
for (j=0;j<n;j++)
{
a[i][j] = rand();
}
}
for (i=0;i<n;i++)
{
for (j=0;j<p;j++)
{
b[i][j] = rand();
}
}
// Series Multiplication
start = clock();
multiply_serial(m, n, p, serial_mul, a, b);
end = clock();
serial_time = ((double) (end - start)) / CLOCKS_PER_SEC;
// Parallel Multiplication
start = clock();
multiply_parallel(m, n, p, parallel_mul, a, b);
end = clock();
parallel_time = ((double) (end - start)) / CLOCKS_PER_SEC;
// Printing the matrices of dimensions m x p :
/*// Series Multiplication
printf("The resultant matrix when multiplied in series is :\n");
for (i=0;i<m;i++)
{
for (j=0;j<p;j++)
{
printf("%d ", serial_mul[i][j]);
}
printf("\n");
}
// Parallel Multiplication
printf("The resultant matrix when multiplied in parallel is :\n");
for (i=0;i<m;i++)
{
for (j=0;j<p;j++)
{
printf("%d ", parallel_mul[i][j]);
}
printf("\n");
}*/
// Print the performance measures.
printf("The Serial Algorithm execution time : %f\n", serial_time);
printf("The Parallel Algorithm execution time : %f\n", parallel_time);
if (serial_time > parallel_time)
{
printf("Therefore the parallel execution algorrithm is faster.\n");
}
else if (serial_time < parallel_time)
{
printf("Therefore the serial execution algorrithm is faster.\n");
}
else
{
printf("Both the algorithms perform equally.\n");
}
} |
simde-diagnostic.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
*/
/* SIMDe targets a very wide range of standards and compilers, and our
* goal is to compile cleanly even with extremely aggressive warnings
* (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.)
* treated as errors.
*
* While our preference is to resolve the underlying issue a given
* diagnostic is warning us about, sometimes that's not possible.
* Fixing a warning in one compiler may cause problems in another.
* Sometimes a warning doesn't really apply to us (false positives),
* and sometimes adhering to a warning would mean dropping a feature
* we *know* the compiler supports since we have tested specifically
* for the compiler or feature.
*
* When practical, warnings are only disabled for specific code. For
* a list of warnings which are enabled by default in all SIMDe code,
* see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the
* warning stack when SIMDe is done parsing, so code which includes
* SIMDe is not deprived of these warnings.
*/
#if !defined(SIMDE_DIAGNOSTIC_H)
#define SIMDE_DIAGNOSTIC_H
#include "hedley.h"
#include "simde-detect-clang.h"
/* This is only to help us implement functions like _mm_undefined_ps. */
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
#undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
#if HEDLEY_HAS_WARNING("-Wuninitialized")
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,2,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"")
#elif HEDLEY_PGI_VERSION_CHECK(19,10,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)")
#elif \
HEDLEY_TI_VERSION_CHECK(16,9,9) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700))
#endif
/* GCC emits a lot of "notes" about the ABI being different for things
* in newer versions of GCC. We don't really care because all our
* functions are inlined and don't generate ABI. */
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PSABI_
#endif
/* Since MMX uses x87 FP registers, you're supposed to call _mm_empty()
* after each MMX function before any floating point instructions.
* Some compilers warn about functions which use MMX functions but
* don't call _mm_empty(). However, since SIMDe is implementyng the
* MMX API we shouldn't be calling _mm_empty(); we leave it to the
* caller to invoke simde_mm_empty(). */
#if HEDLEY_INTEL_VERSION_CHECK(19,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)")
#elif defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_
#endif
/* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they
* emit a diagnostic if you use #pragma simd instead of
* #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to
* compile with -qopenmp or -qopenmp-simd and define
* SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */
#if HEDLEY_INTEL_VERSION_CHECK(18,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_
#endif
/* MSVC emits a diagnostic when we call a function (like
* simde_mm_set_epi32) while initializing a struct. We currently do
* this a *lot* in the tests. */
#if \
defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_
#endif
/* This warning needs a lot of work. It is triggered if all you do is
* pass the value to memcpy/__builtin_memcpy, or if you initialize a
* member of the union, even if that member takes up the entire union.
* Last tested with clang-10, hopefully things will improve in the
* future; if clang fixes this I'd love to enable it. */
#if \
HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
#define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_
#endif
/* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which
* will is false. However, SIMDe uses these operations exclusively
* for things like _mm_cmpeq_ps, for which we really do want to check
* for equality (or inequality).
*
* If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro
* which just wraps a check in some code do disable this diagnostic I'd
* be happy to accept it. */
#if \
HEDLEY_HAS_WARNING("-Wfloat-equal") || \
HEDLEY_GCC_VERSION_CHECK(3,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_
#endif
/* This is because we use HEDLEY_STATIC_ASSERT for static assertions.
* If Hedley can't find an implementation it will preprocess to
* nothing, which means there will be a trailing semi-colon. */
#if HEDLEY_HAS_WARNING("-Wextra-semi")
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"")
#elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_
#endif
/* We do use a few variadic macros, which technically aren't available
* until C99 and C++11, but every compiler I'm aware of has supported
* them for much longer. That said, usage is isolated to the test
* suite and compilers known to support them. */
#if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0)
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \
_Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \
_Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"")
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_
#endif
/* emscripten requires us to use a __wasm_unimplemented_simd128__ macro
* before we can access certain SIMD intrinsics, but this diagnostic
* warns about it being a reserved name. It is a reserved name, but
* it's reserved for the compiler and we are using it to convey
* information to the compiler.
*
* This is also used when enabling native aliases since we don't get to
* choose the macro names. */
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_
#endif
/* Similar to above; types like simde__m128i are reserved due to the
* double underscore, but we didn't choose them, Intel did. */
#if HEDLEY_HAS_WARNING("-Wreserved-identifier")
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_ _Pragma("clang diagnostic ignored \"-Wreserved-identifier\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_
#endif
/* clang 3.8 warns about the packed attribute being unnecessary when
* used in the _mm_loadu_* functions. That *may* be true for version
* 3.8, but for later versions it is crucial in order to make unaligned
* access safe. */
#if HEDLEY_HAS_WARNING("-Wpacked")
#define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PACKED_
#endif
/* Triggered when assigning a float to a double implicitly. We use
* explicit casts in SIMDe, this is only used in the test suite. */
#if HEDLEY_HAS_WARNING("-Wdouble-promotion")
#define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_
#endif
/* Several compilers treat conformant array parameters as VLAs. We
* test to make sure we're in C mode (C++ doesn't support CAPs), and
* that the version of the standard supports CAPs. We also reject
* some buggy compilers like MSVC (the logic is in Hedley if you want
* to take a look), but with certain warnings enabled some compilers
* still like to emit a diagnostic. */
#if HEDLEY_HAS_WARNING("-Wvla")
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_
#endif
#if HEDLEY_HAS_WARNING("-Wused-but-marked-unused")
#define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_
#endif
#if HEDLEY_HAS_WARNING("-Wpass-failed")
#define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_
#endif
#if HEDLEY_HAS_WARNING("-Wpadded")
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_
#endif
#if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant")
#define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_
#endif
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_
#endif
#if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_
#endif
/* clang will emit this warning when we use C99 extensions whan not in
* C99 mode, even though it does support this. In such cases we check
* the compiler and version first, so we know it's not a problem. */
#if HEDLEY_HAS_WARNING("-Wc99-extensions")
#define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_
#endif
/* https://github.com/simd-everywhere/simde/issues/277 */
#if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_
#endif
/* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS
* to silence, but you have to do that before including anything and
* that would require reordering includes. */
#if defined(_MSC_VER)
#define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_
#endif
/* Some compilers, such as clang, may use `long long` for 64-bit
* integers, but `long long` triggers a diagnostic with
* -Wc++98-compat-pedantic which says 'long long' is incompatible with
* C++98. */
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
#if HEDLEY_HAS_WARNING("-Wc++11-long-long")
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \
_Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") \
_Pragma("clang diagnostic ignored \"-Wc++11-long-long\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"")
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_
#endif
/* Some problem as above */
#if HEDLEY_HAS_WARNING("-Wc++11-long-long")
#define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_
#endif
/* emscripten emits this whenever stdin/stdout/stderr is used in a
* macro. */
#if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion")
#define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_
#endif
/* Clang uses C11 generic selections to implement some AltiVec
* functions, which triggers this diagnostic when not compiling
* in C11 mode */
#if HEDLEY_HAS_WARNING("-Wc11-extensions")
#define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc11-extensions\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_
#endif
/* Clang sometimes triggers this warning in macros in the AltiVec and
* NEON headers, or due to missing functions. */
#if HEDLEY_HAS_WARNING("-Wvector-conversion")
#define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"")
/* For NEON, the situation with -Wvector-conversion in clang < 10 is
* bad enough that we just disable the warning altogether. On x86,
* clang has similar issues on several sse4.2+ intrinsics before 3.8. */
#if \
(defined(SIMDE_ARCH_ARM) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)) || \
SIMDE_DETECT_CLANG_VERSION_NOT(3,8,0)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
#if !defined(SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_
#endif
/* Prior to 5.0, clang didn't support disabling diagnostics in
* statement exprs. As a result, some macros we use don't
* properly silence warnings. */
#if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") && HEDLEY_HAS_WARNING("-Wcast-align")
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") _Pragma("clang diagnostic ignored \"-Wcast-align\"")
#elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual")
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
#elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-align")
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-align\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_
#endif
/* SLEEF triggers this a *lot* in their headers */
#if HEDLEY_HAS_WARNING("-Wignored-qualifiers")
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_
#endif
/* GCC emits this under some circumstances when using __int128 */
#if HEDLEY_GCC_VERSION_CHECK(4,8,0)
#define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ _Pragma("GCC diagnostic ignored \"-Wpedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_
#endif
/* MSVC doesn't like (__assume(0), code) and will warn about code being
* unreachable, but we want it there because not all compilers
* understand the unreachable macro and will complain if it is missing.
* I'm planning on adding a new macro to Hedley to handle this a bit
* more elegantly, but until then... */
#if defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ __pragma(warning(disable:4702))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_
#endif
/* This is a false positive from GCC in a few places. */
#if HEDLEY_GCC_VERSION_CHECK(4,7,0)
#define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_
#endif
#if defined(SIMDE_ENABLE_NATIVE_ALIASES)
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \
SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_
#else
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_
#endif
/* Some native functions on E2K with instruction set < v6 are declared
* as deprecated due to inefficiency. Still they are more efficient
* than SIMDe implementation. So we're using them, and switching off
* these deprecation warnings. */
#if defined(HEDLEY_MCST_LCC_VERSION)
# define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS _Pragma("diag_suppress 1215,1444")
# define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS _Pragma("diag_default 1215,1444")
#else
# define SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS
# define SIMDE_LCC_REVERT_DEPRECATED_WARNINGS
#endif
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \
HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION \
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \
SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \
SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \
SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \
SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \
SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \
SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \
SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \
SIMDE_DIAGNOSTIC_DISABLE_VLA_ \
SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \
SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \
SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \
SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ \
SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_
#endif /* !defined(SIMDE_DIAGNOSTIC_H) */
|
GB_binop__rdiv_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp32)
// A*D function (colscale): GB (_AxD__rdiv_fp32)
// D*A function (rowscale): GB (_DxB__rdiv_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp32)
// C=scalar+B GB (_bind1st__rdiv_fp32)
// C=scalar+B' GB (_bind1st_tran__rdiv_fp32)
// C=A+scalar GB (_bind2nd__rdiv_fp32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (bij / aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y / x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (bij / x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (y / aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y / aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mod.c | #include <math.h>
#include "../sailfish.h"
// ============================ COMPAT ========================================
// ============================================================================
#ifdef __ROCM__
#include <hip/hip_runtime.h>
#endif
#if !defined(__NVCC__) && !defined(__ROCM__)
#define __device__
#define __host__
#define EXTERN_C
#else
#define EXTERN_C extern "C"
#endif
// ============================ PHYSICS =======================================
// ============================================================================
#define NCONS 4
#define PLM_THETA 1.5
#define GAMMA_LAW_INDEX (5.0 / 3.0)
// ============================ MATH ==========================================
// ============================================================================
#define real double
#define min2(a, b) ((a) < (b) ? (a) : (b))
#define max2(a, b) ((a) > (b) ? (a) : (b))
#define min3(a, b, c) min2(a, min2(b, c))
#define max3(a, b, c) max2(a, max2(b, c))
#define sign(x) copysign(1.0, x)
#define minabs(a, b, c) min3(fabs(a), fabs(b), fabs(c))
static __host__ __device__ real plm_gradient_scalar(real yl, real y0, real yr)
{
real a = (y0 - yl) * PLM_THETA;
real b = (yr - yl) * 0.5;
real c = (yr - y0) * PLM_THETA;
return 0.25 * fabs(sign(a) + sign(b)) * (sign(a) + sign(c)) * minabs(a, b, c);
}
static __host__ __device__ void plm_gradient(real *yl, real *y0, real *yr, real *g)
{
for (int q = 0; q < NCONS; ++q)
{
g[q] = plm_gradient_scalar(yl[q], y0[q], yr[q]);
}
}
// ============================ GRAVITY =======================================
// ============================================================================
static __host__ __device__ real disk_height(
struct PointMassList *mass_list,
real x1,
real y1,
real *prim)
{
real omegatilde2 = 0.0;
for (int p = 0; p < mass_list->count; ++p)
{
real x0 = mass_list->masses[p].x;
real y0 = mass_list->masses[p].y;
real mp = mass_list->masses[p].mass;
real dx = x1 - x0;
real dy = y1 - y0;
real r2 = dx * dx + dy * dy + 1e-12;
real r = sqrt(r2);
omegatilde2 += mp * pow(r, -3.0);
}
real sigma = prim[0];
real pres = prim[3];
return sqrt(pres / sigma) / sqrt(omegatilde2);
}
static __host__ __device__ void point_mass_source_term(
struct PointMass *mass,
real x1,
real y1,
real dt,
real *prim,
real h,
real *delta_cons,
int constant_softening)
{
real x0 = mass->x;
real y0 = mass->y;
real mp = mass->mass;
real rs = mass->radius;
real sigma = prim[0];
real pres = prim[3];
real gamma = GAMMA_LAW_INDEX;
real eps = pres / (gamma - 1.0) / sigma;
real dx = x1 - x0;
real dy = y1 - y0;
real r2 = dx * dx + dy * dy;
real softening_length = constant_softening ? rs : 0.5 * h;
real r2_soft = r2 + pow(softening_length, 2.0);
real dr = sqrt(r2);
real mag = sigma * mp * pow(r2_soft, -1.5);
real fx = -mag * dx;
real fy = -mag * dy;
real vx = prim[1];
real vy = prim[2];
real sink_rate = 0.0;
if (dr < 4.0 * rs)
{
sink_rate = mass->rate * exp(-pow(dr / rs, 4.0));
}
if (!constant_softening)
{
if (dr < rs)
{
real transition = pow(1.0 - pow(dr / rs, 2.0), 2.0);
real mod_rs = transition * rs + (1.0 - transition) * 0.5 * h;
r2_soft = r2 + pow(mod_rs, 2.0);
mag = sigma * mp / pow(r2_soft, 1.5);
fx = -mag * dx;
fy = -mag * dy;
}
}
//if (dr < 1.0 * rs)
//{
// sink_rate = mass->rate * pow(1.0 - pow(dr / rs, 2.0), 2.0);
//}
real mdot = sigma * sink_rate * -1.0;
switch (mass->model) {
case AccelerationFree:
delta_cons[0] = dt * mdot;
delta_cons[1] = dt * mdot * prim[1] + dt * fx;
delta_cons[2] = dt * mdot * prim[2] + dt * fy;
delta_cons[3] = dt * (mdot * eps + 0.5 * mdot * (vx * vx + vy * vy)) + dt * (fx * vx + fy * vy);
break;
case TorqueFree: {
real vx = prim[1];
real vy = prim[2];
real vx0 = mass->vx;
real vy0 = mass->vy;
real rhatx = dx / (dr + 1e-12);
real rhaty = dy / (dr + 1e-12);
real dvdotrhat = (vx - vx0) * rhatx + (vy - vy0) * rhaty;
real vxstar = dvdotrhat * rhatx + vx0;
real vystar = dvdotrhat * rhaty + vy0;
delta_cons[0] = dt * mdot;
delta_cons[1] = dt * mdot * vxstar + dt * fx;
delta_cons[2] = dt * mdot * vystar + dt * fy;
delta_cons[3] = dt * (mdot * eps + 0.5 * mdot * (vxstar * vxstar + vystar * vystar)) + dt * (fx * vx + fy * vy);
break;
}
case ForceFree:
delta_cons[0] = dt * mdot;
delta_cons[1] = dt * fx;
delta_cons[2] = dt * fy;
delta_cons[3] = dt * (fx * vx + fy * vy);
break;
default:
delta_cons[0] = 0.0;
delta_cons[1] = 0.0;
delta_cons[2] = 0.0;
delta_cons[3] = 0.0;
break;
}
}
static __host__ __device__ void point_masses_source_term(
struct PointMassList *mass_list,
real x1,
real y1,
real dt,
real *prim,
real h,
real *cons,
int constant_softening)
{
for (int p = 0; p < mass_list->count; ++p)
{
real delta_cons[NCONS];
point_mass_source_term(&mass_list->masses[p], x1, y1, dt, prim, h, delta_cons, constant_softening);
for (int q = 0; q < NCONS; ++q)
{
cons[q] += delta_cons[q];
}
}
}
// ============================ EOS AND BUFFER ================================
// ============================================================================
static __host__ __device__ real sound_speed_squared(
struct EquationOfState *eos,
real *prim)
{
switch (eos->type)
{
case GammaLaw:
return prim[3] / prim[0] * GAMMA_LAW_INDEX;
default:
return 1.0; // WARNING
}
}
static __host__ __device__ void buffer_source_term(
struct BoundaryCondition *bc,
real xc,
real yc,
real dt,
real *cons)
{
switch (bc->type)
{
case Default:
case Inflow:
break;
case KeplerianBuffer:
{
real rc = sqrt(xc * xc + yc * yc);
real surface_density = bc->keplerian_buffer.surface_density;
real surface_pressure = bc->keplerian_buffer.surface_pressure;
real central_mass = bc->keplerian_buffer.central_mass;
real driving_rate = bc->keplerian_buffer.driving_rate;
real outer_radius = bc->keplerian_buffer.outer_radius;
real onset_width = bc->keplerian_buffer.onset_width;
real onset_radius = outer_radius - onset_width;
if (rc > onset_radius)
{
real pf = surface_density * sqrt(central_mass / rc);
real px = pf * (-yc / rc);
real py = pf * ( xc / rc);
real kinetic_energy = 0.5 * (px * px + py * py) / surface_density;
real energy = surface_pressure / (GAMMA_LAW_INDEX - 1.0) + kinetic_energy;
real u0[NCONS] = {surface_density, px, py, energy};
real omega_outer = sqrt(central_mass * pow(onset_radius, -3.0));
//real buffer_rate = driving_rate * omega_outer * max2(rc, 1.0);
real buffer_rate = driving_rate * omega_outer * (rc - onset_radius) / (outer_radius - onset_radius);
for (int q = 0; q < NCONS; ++q)
{
cons[q] -= (cons[q] - u0[q]) * buffer_rate * dt;
}
}
break;
}
}
}
static __host__ __device__ void shear_strain(const real *gx, const real *gy, real dx, real dy, real *s)
{
real sxx = 4.0 / 3.0 * gx[1] / dx - 2.0 / 3.0 * gy[2] / dy;
real syy =-2.0 / 3.0 * gx[1] / dx + 4.0 / 3.0 * gy[2] / dy;
real sxy = 1.0 / 1.0 * gx[2] / dx + 1.0 / 1.0 * gy[1] / dy;
real syx = sxy;
s[0] = sxx;
s[1] = sxy;
s[2] = syx;
s[3] = syy;
}
// ============================ HYDRO =========================================
// ============================================================================
static __host__ __device__ void cooling_term(
real cooling_coefficient,
real mach_ceiling,
real dt,
real *prim,
real *cons)
{
real gamma = GAMMA_LAW_INDEX;
real sigma = prim[0];
real eps = prim[3] / prim[0] / (gamma - 1.0);
real eps_cooled = eps * pow(1.0 + 3.0 * cooling_coefficient * pow(sigma, -2.0) * pow(eps, 3.0) * dt, -1.0 / 3.0);
real vx = prim[1];
real vy = prim[2];
real ek = 0.5 * (vx * vx + vy * vy);
eps_cooled = max2(eps_cooled, 2.0 * ek / gamma / (gamma - 1.0) * pow(mach_ceiling, -2.0));
cons[3] += sigma * (eps_cooled - eps);
}
static __host__ __device__ void conserved_to_primitive(
const real *cons,
real *prim,
real velocity_ceiling,
real density_floor,
real pressure_floor)
{
real gamma = GAMMA_LAW_INDEX;
real pres = max2(pressure_floor, (cons[3] - 0.5 * (cons[1] * cons[1] + cons[2] * cons[2]) / cons[0]) * (gamma - 1.0));
real vx = sign(cons[1]) * min2(fabs(cons[1] / cons[0]), velocity_ceiling);
real vy = sign(cons[2]) * min2(fabs(cons[2] / cons[0]), velocity_ceiling);
real rho = cons[0];
if (cons[0] < density_floor)
{
rho = density_floor;
vx = 0.0;
vy = 0.0;
pres = pressure_floor;
}
prim[0] = rho;
prim[1] = vx;
prim[2] = vy;
prim[3] = pres;
}
static __host__ __device__ void primitive_to_conserved(const real *prim, real *cons)
{
real gamma = GAMMA_LAW_INDEX;
real rho = prim[0];
real vx = prim[1];
real vy = prim[2];
real pres = prim[3];
real px = vx * rho;
real py = vy * rho;
real en = pres / (gamma - 1.0) + 0.5 * rho * (vx * vx + vy * vy);
cons[0] = rho;
cons[1] = px;
cons[2] = py;
cons[3] = en;
}
static __host__ __device__ real primitive_to_velocity(const real *prim, int direction)
{
switch (direction)
{
case 0: return prim[1];
case 1: return prim[2];
default: return 0.0;
}
}
static __host__ __device__ void primitive_to_flux(
const real *prim,
const real *cons,
real *flux,
int direction)
{
real vn = primitive_to_velocity(prim, direction);
real pressure = prim[3];
flux[0] = vn * cons[0];
flux[1] = vn * cons[1] + pressure * (direction == 0);
flux[2] = vn * cons[2] + pressure * (direction == 1);
flux[3] = vn * (cons[3] + pressure);
}
static __host__ __device__ void primitive_to_outer_wavespeeds(
const real *prim,
real *wavespeeds,
real cs2,
int direction)
{
real cs = sqrt(cs2);
real vn = primitive_to_velocity(prim, direction);
wavespeeds[0] = vn - cs;
wavespeeds[1] = vn + cs;
}
static __host__ __device__ real primitive_max_wavespeed(const real *prim, real cs2)
{
real cs = sqrt(cs2);
real vx = prim[1];
real vy = prim[2];
real ax = max2(fabs(vx - cs), fabs(vx + cs));
real ay = max2(fabs(vy - cs), fabs(vy + cs));
return max2(ax, ay);
}
static __host__ __device__ void riemann_hlle(const real *pl, const real *pr, real *flux, real cs2, int direction)
{
real ul[NCONS];
real ur[NCONS];
real fl[NCONS];
real fr[NCONS];
real al[2];
real ar[2];
primitive_to_conserved(pl, ul);
primitive_to_conserved(pr, ur);
primitive_to_flux(pl, ul, fl, direction);
primitive_to_flux(pr, ur, fr, direction);
primitive_to_outer_wavespeeds(pl, al, cs2, direction);
primitive_to_outer_wavespeeds(pr, ar, cs2, direction);
const real am = min3(0.0, al[0], ar[0]);
const real ap = max3(0.0, al[1], ar[1]);
for (int q = 0; q < NCONS; ++q)
{
flux[q] = (fl[q] * ap - fr[q] * am - (ul[q] - ur[q]) * ap * am) / (ap - am);
}
}
// ============================ PATCH =========================================
// ============================================================================
#define FOR_EACH(p) \
for (int i = p.start[0]; i < p.start[0] + p.count[0]; ++i) \
for (int j = p.start[1]; j < p.start[1] + p.count[1]; ++j)
#define FOR_EACH_OMP(p) \
_Pragma("omp parallel for") \
for (int i = p.start[0]; i < p.start[0] + p.count[0]; ++i) \
for (int j = p.start[1]; j < p.start[1] + p.count[1]; ++j)
#define GET(p, i, j) (p.data + p.jumps[0] * ((i) - p.start[0]) + p.jumps[1] * ((j) - p.start[1]))
struct Patch
{
int start[2];
int count[2];
int jumps[2];
int num_fields;
real *data;
};
static struct Patch patch(struct Mesh mesh, int num_fields, int num_guard, real *data)
{
struct Patch patch;
patch.start[0] = -num_guard;
patch.start[1] = -num_guard;
patch.count[0] = mesh.ni + 2 * num_guard;
patch.count[1] = mesh.nj + 2 * num_guard;
patch.jumps[0] = num_fields * patch.count[1];
patch.jumps[1] = num_fields;
patch.num_fields = num_fields;
patch.data = data;
return patch;
}
// ============================ SCHEME ========================================
// ============================================================================
static __host__ __device__ void primitive_to_conserved_zone(
struct Patch primitive,
struct Patch conserved,
int i,
int j)
{
real *p = GET(primitive, i, j);
real *u = GET(conserved, i, j);
primitive_to_conserved(p, u);
}
static __host__ __device__ void advance_rk_zone(
struct Mesh mesh,
struct Patch conserved_rk,
struct Patch primitive_rd,
struct Patch primitive_wr,
struct EquationOfState eos,
struct BoundaryCondition bc,
struct PointMassList mass_list,
real alpha,
real a,
real dt,
real velocity_ceiling,
real cooling_coefficient,
real mach_ceiling,
real density_floor,
real pressure_floor,
int constant_softening,
int i,
int j)
{
real dx = mesh.dx;
real dy = mesh.dy;
real xl = mesh.x0 + (i + 0.0) * dx;
real xc = mesh.x0 + (i + 0.5) * dx;
real xr = mesh.x0 + (i + 1.0) * dx;
real yl = mesh.y0 + (j + 0.0) * dy;
real yc = mesh.y0 + (j + 0.5) * dy;
real yr = mesh.y0 + (j + 1.0) * dy;
// ------------------------------------------------------------------------
// tj
//
// +-------+-------+-------+
// | | | |
// | lr | rj | rr |
// | | | |
// +-------+-------+-------+
// | | | |
// ki | li -|+ c -|+ ri | ti
// | | | |
// +-------+-------+-------+
// | | | |
// | ll | lj | rl |
// | | | |
// +-------+-------+-------+
//
// kj
// ------------------------------------------------------------------------
real *un = GET(conserved_rk, i, j);
real *pcc = GET(primitive_rd, i, j);
real *pli = GET(primitive_rd, i - 1, j);
real *pri = GET(primitive_rd, i + 1, j);
real *plj = GET(primitive_rd, i, j - 1);
real *prj = GET(primitive_rd, i, j + 1);
real *pki = GET(primitive_rd, i - 2, j);
real *pti = GET(primitive_rd, i + 2, j);
real *pkj = GET(primitive_rd, i, j - 2);
real *ptj = GET(primitive_rd, i, j + 2);
real *pll = GET(primitive_rd, i - 1, j - 1);
real *plr = GET(primitive_rd, i - 1, j + 1);
real *prl = GET(primitive_rd, i + 1, j - 1);
real *prr = GET(primitive_rd, i + 1, j + 1);
real plip[NCONS];
real plim[NCONS];
real prip[NCONS];
real prim[NCONS];
real pljp[NCONS];
real pljm[NCONS];
real prjp[NCONS];
real prjm[NCONS];
real gxli[NCONS];
real gxri[NCONS];
real gyli[NCONS];
real gyri[NCONS];
real gxlj[NCONS];
real gxrj[NCONS];
real gylj[NCONS];
real gyrj[NCONS];
real gxcc[NCONS];
real gycc[NCONS];
plm_gradient(pki, pli, pcc, gxli);
plm_gradient(pli, pcc, pri, gxcc);
plm_gradient(pcc, pri, pti, gxri);
plm_gradient(pkj, plj, pcc, gylj);
plm_gradient(plj, pcc, prj, gycc);
plm_gradient(pcc, prj, ptj, gyrj);
plm_gradient(pll, pli, plr, gyli);
plm_gradient(prl, pri, prr, gyri);
plm_gradient(pll, plj, prl, gxlj);
plm_gradient(plr, prj, prr, gxrj);
for (int q = 0; q < NCONS; ++q)
{
plim[q] = pli[q] + 0.5 * gxli[q];
plip[q] = pcc[q] - 0.5 * gxcc[q];
prim[q] = pcc[q] + 0.5 * gxcc[q];
prip[q] = pri[q] - 0.5 * gxri[q];
pljm[q] = plj[q] + 0.5 * gylj[q];
pljp[q] = pcc[q] - 0.5 * gycc[q];
prjm[q] = pcc[q] + 0.5 * gycc[q];
prjp[q] = prj[q] - 0.5 * gyrj[q];
}
real fli[NCONS];
real fri[NCONS];
real flj[NCONS];
real frj[NCONS];
real ucc[NCONS];
real cs2li = sound_speed_squared(&eos, pli);
real cs2ri = sound_speed_squared(&eos, pri);
real cs2lj = sound_speed_squared(&eos, plj);
real cs2rj = sound_speed_squared(&eos, prj);
riemann_hlle(plim, plip, fli, cs2li, 0);
riemann_hlle(prim, prip, fri, cs2ri, 0);
riemann_hlle(pljm, pljp, flj, cs2lj, 1);
riemann_hlle(prjm, prjp, frj, cs2rj, 1);
real sli[4];
real sri[4];
real slj[4];
real srj[4];
real scc[4];
shear_strain(gxli, gyli, dx, dy, sli);
shear_strain(gxri, gyri, dx, dy, sri);
shear_strain(gxlj, gylj, dx, dy, slj);
shear_strain(gxrj, gyrj, dx, dy, srj);
shear_strain(gxcc, gycc, dx, dy, scc);
real cs2cc = sound_speed_squared(&eos, pcc);
real hcc = disk_height(&mass_list, xc, yc, pcc);
real hli = disk_height(&mass_list, xl, yc, pli);
real hri = disk_height(&mass_list, xr, yc, pri);
real hlj = disk_height(&mass_list, xc, yl, plj);
real hrj = disk_height(&mass_list, xc, yr, prj);
real nucc = alpha * hcc * sqrt(cs2cc);
real nuli = alpha * hli * sqrt(cs2li);
real nuri = alpha * hri * sqrt(cs2ri);
real nulj = alpha * hlj * sqrt(cs2lj);
real nurj = alpha * hrj * sqrt(cs2rj);
fli[1] -= 0.5 * (nuli * pli[0] * sli[0] + nucc * pcc[0] * scc[0]); // x-x
fli[2] -= 0.5 * (nuli * pli[0] * sli[1] + nucc * pcc[0] * scc[1]); // x-y
fri[1] -= 0.5 * (nucc * pcc[0] * scc[0] + nuri * pri[0] * sri[0]); // x-x
fri[2] -= 0.5 * (nucc * pcc[0] * scc[1] + nuri * pri[0] * sri[1]); // x-y
flj[1] -= 0.5 * (nulj * plj[0] * slj[2] + nucc * pcc[0] * scc[2]); // y-x
flj[2] -= 0.5 * (nulj * plj[0] * slj[3] + nucc * pcc[0] * scc[3]); // y-y
frj[1] -= 0.5 * (nucc * pcc[0] * scc[2] + nurj * prj[0] * srj[2]); // y-x
frj[2] -= 0.5 * (nucc * pcc[0] * scc[3] + nurj * prj[0] * srj[3]); // y-y
fli[3] -= 0.5 * (nuli * pli[0] * sli[0] * pli[1] + nucc * pcc[0] * scc[0] * pcc[1]); // v^x \tau^x_x
fri[3] -= 0.5 * (nucc * pcc[0] * scc[0] * pcc[1] + nuri * pri[0] * sri[0] * pri[1]);
fli[3] -= 0.5 * (nuli * pli[0] * sli[1] * pli[2] + nucc * pcc[0] * scc[1] * pcc[2]); // v^y \tau^x_y
fri[3] -= 0.5 * (nucc * pcc[0] * scc[1] * pcc[2] + nuri * pri[0] * sri[1] * pri[2]);
flj[3] -= 0.5 * (nulj * plj[0] * slj[2] * plj[1] + nucc * pcc[0] * scc[2] * pcc[1]); // v^x \tau^y_x
frj[3] -= 0.5 * (nucc * pcc[0] * scc[2] * pcc[1] + nurj * prj[0] * srj[2] * prj[1]);
flj[3] -= 0.5 * (nulj * plj[0] * slj[3] * plj[2] + nucc * pcc[0] * scc[3] * pcc[2]); // v^y \tau^y_y
frj[3] -= 0.5 * (nucc * pcc[0] * scc[3] * pcc[2] + nurj * prj[0] * srj[3] * prj[2]);
primitive_to_conserved(pcc, ucc);
buffer_source_term(&bc, xc, yc, dt, ucc);
point_masses_source_term(&mass_list, xc, yc, dt, pcc, hcc, ucc, constant_softening);
cooling_term(cooling_coefficient, mach_ceiling, dt, pcc, ucc);
for (int q = 0; q < NCONS; ++q)
{
ucc[q] -= ((fri[q] - fli[q]) / dx + (frj[q] - flj[q]) / dy) * dt;
ucc[q] = (1.0 - a) * ucc[q] + a * un[q];
}
real *pout = GET(primitive_wr, i, j);
conserved_to_primitive(ucc, pout, velocity_ceiling, density_floor, pressure_floor);
}
static __host__ __device__ void advance_rk_zone_inviscid(
struct Mesh mesh,
struct Patch conserved_rk,
struct Patch primitive_rd,
struct Patch primitive_wr,
struct EquationOfState eos,
struct BoundaryCondition bc,
struct PointMassList mass_list,
real a,
real dt,
real velocity_ceiling,
real cooling_coefficient,
real mach_ceiling,
real density_floor,
real pressure_floor,
int constant_softening,
int i,
int j)
{
real dx = mesh.dx;
real dy = mesh.dy;
real xc = mesh.x0 + (i + 0.5) * dx;
real yc = mesh.y0 + (j + 0.5) * dy;
real *un = GET(conserved_rk, i, j);
real *pcc = GET(primitive_rd, i, j);
real *pli = GET(primitive_rd, i - 1, j);
real *pri = GET(primitive_rd, i + 1, j);
real *plj = GET(primitive_rd, i, j - 1);
real *prj = GET(primitive_rd, i, j + 1);
real *pki = GET(primitive_rd, i - 2, j);
real *pti = GET(primitive_rd, i + 2, j);
real *pkj = GET(primitive_rd, i, j - 2);
real *ptj = GET(primitive_rd, i, j + 2);
real plip[NCONS];
real plim[NCONS];
real prip[NCONS];
real prim[NCONS];
real pljp[NCONS];
real pljm[NCONS];
real prjp[NCONS];
real prjm[NCONS];
real gxli[NCONS];
real gxri[NCONS];
real gylj[NCONS];
real gyrj[NCONS];
real gxcc[NCONS];
real gycc[NCONS];
plm_gradient(pki, pli, pcc, gxli);
plm_gradient(pli, pcc, pri, gxcc);
plm_gradient(pcc, pri, pti, gxri);
plm_gradient(pkj, plj, pcc, gylj);
plm_gradient(plj, pcc, prj, gycc);
plm_gradient(pcc, prj, ptj, gyrj);
for (int q = 0; q < NCONS; ++q)
{
plim[q] = pli[q] + 0.5 * gxli[q];
plip[q] = pcc[q] - 0.5 * gxcc[q];
prim[q] = pcc[q] + 0.5 * gxcc[q];
prip[q] = pri[q] - 0.5 * gxri[q];
pljm[q] = plj[q] + 0.5 * gylj[q];
pljp[q] = pcc[q] - 0.5 * gycc[q];
prjm[q] = pcc[q] + 0.5 * gycc[q];
prjp[q] = prj[q] - 0.5 * gyrj[q];
}
real fli[NCONS];
real fri[NCONS];
real flj[NCONS];
real frj[NCONS];
real ucc[NCONS];
real cs2li = sound_speed_squared(&eos, pli);
real cs2ri = sound_speed_squared(&eos, pri);
real cs2lj = sound_speed_squared(&eos, plj);
real cs2rj = sound_speed_squared(&eos, prj);
riemann_hlle(plim, plip, fli, cs2li, 0);
riemann_hlle(prim, prip, fri, cs2ri, 0);
riemann_hlle(pljm, pljp, flj, cs2lj, 1);
riemann_hlle(prjm, prjp, frj, cs2rj, 1);
real h = disk_height(&mass_list, xc, yc, pcc);
primitive_to_conserved(pcc, ucc);
buffer_source_term(&bc, xc, yc, dt, ucc);
point_masses_source_term(&mass_list, xc, yc, dt, pcc, h, ucc, constant_softening);
cooling_term(cooling_coefficient, mach_ceiling, dt, pcc, ucc);
for (int q = 0; q < NCONS; ++q)
{
ucc[q] -= ((fri[q] - fli[q]) / dx + (frj[q] - flj[q]) / dy) * dt;
ucc[q] = (1.0 - a) * ucc[q] + a * un[q];
}
real *pout = GET(primitive_wr, i, j);
conserved_to_primitive(ucc, pout, velocity_ceiling, density_floor, pressure_floor);
}
static __host__ __device__ void point_mass_source_term_zone(
struct Mesh mesh,
struct Patch primitive,
struct Patch cons_rate,
struct PointMassList mass_list,
struct PointMass mass,
int constant_softening,
int i,
int j)
{
real *pc = GET(primitive, i, j);
real *sc = GET(cons_rate, i, j);
real x = mesh.x0 + (i + 0.5) * mesh.dx;
real y = mesh.y0 + (j + 0.5) * mesh.dy;
real h = disk_height(&mass_list, x, y, pc);
point_mass_source_term(&mass, x, y, 1.0, pc, h, sc, constant_softening);
}
static __host__ __device__ void wavespeed_zone(
struct EquationOfState eos,
struct Patch primitive,
struct Patch wavespeed,
int i,
int j)
{
real *pc = GET(primitive, i, j);
real cs2 = sound_speed_squared(&eos, pc);
real a = primitive_max_wavespeed(pc, cs2);
GET(wavespeed, i, j)[0] = a;
}
// ============================ KERNELS =======================================
// ============================================================================
#if defined(__NVCC__) || defined(__ROCM__)
static void __global__ primitive_to_conserved_kernel(
struct Mesh mesh,
struct Patch primitive,
struct Patch conserved)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < mesh.ni && j < mesh.nj)
{
primitive_to_conserved_zone(primitive, conserved, i, j);
}
}
static void __global__ advance_rk_kernel(
struct Mesh mesh,
struct Patch conserved_rk,
struct Patch primitive_rd,
struct Patch primitive_wr,
struct EquationOfState eos,
struct BoundaryCondition bc,
struct PointMassList mass_list,
real alpha,
real a,
real dt,
real velocity_ceiling,
real cooling_coefficient,
real mach_ceiling,
real density_floor,
real pressure_floor,
int constant_softening)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < mesh.ni && j < mesh.nj)
{
advance_rk_zone(
mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
alpha,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening,
i,
j
);
}
}
static void __global__ advance_rk_kernel_inviscid(
struct Mesh mesh,
struct Patch conserved_rk,
struct Patch primitive_rd,
struct Patch primitive_wr,
struct EquationOfState eos,
struct BoundaryCondition bc,
struct PointMassList mass_list,
real a,
real dt,
real velocity_ceiling,
real cooling_coefficient,
real mach_ceiling,
real density_floor,
real pressure_floor,
int constant_softening)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < mesh.ni && j < mesh.nj)
{
advance_rk_zone_inviscid(
mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening,
i,
j
);
}
}
static void __global__ point_mass_source_term_kernel(
struct Mesh mesh,
struct Patch primitive,
struct Patch cons_rate,
struct PointMassList mass_list,
struct PointMass mass,
int constant_softening)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < mesh.ni && j < mesh.nj)
{
point_mass_source_term_zone(mesh, primitive, cons_rate, mass_list, mass, constant_softening, i, j);
}
}
static void __global__ wavespeed_kernel(
struct Mesh mesh,
struct EquationOfState eos,
struct Patch primitive,
struct Patch wavespeed)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (i < mesh.ni && j < mesh.nj)
{
wavespeed_zone(eos, primitive, wavespeed, i, j);
}
}
#endif
// ============================ PUBLIC API ====================================
// ============================================================================
/**
* Converts an array of primitive data to an array of conserved data. The
* array index space must follow the descriptions below.
* @param mesh The mesh [ni, nj]
* @param primitive_ptr[in] [-2, -2] [ni + 4, nj + 4] [4]
* @param conserved_ptr[out] [ 0, 0] [ni, nj] [4]
* @param mode The execution mode
*/
EXTERN_C void euler2d_primitive_to_conserved(
struct Mesh mesh,
real *primitive_ptr,
real *conserved_ptr,
enum ExecutionMode mode)
{
struct Patch primitive = patch(mesh, NCONS, 2, primitive_ptr);
struct Patch conserved = patch(mesh, NCONS, 0, conserved_ptr);
switch (mode) {
case CPU: {
FOR_EACH(conserved) {
primitive_to_conserved_zone(primitive, conserved, i, j);
}
break;
}
case OMP: {
#ifdef _OPENMP
FOR_EACH_OMP(conserved) {
primitive_to_conserved_zone(primitive, conserved, i, j);
}
#endif
break;
}
case GPU: {
#if defined(__NVCC__) || defined(__ROCM__)
dim3 bs = dim3(16, 16);
dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y);
primitive_to_conserved_kernel<<<bd, bs>>>(mesh, primitive, conserved);
#endif
break;
}
}
}
/**
* Updates an array of primitive data by advancing it a single Runge-Kutta
* step.
* @param mesh The mesh [ni, nj]
* @param conserved_rk_ptr[in] [ 0, 0] [ni, nj] [4]
* @param primitive_rd_ptr[in] [-2, -2] [ni + 4, nj + 4] [4]
* @param primitive_wr_ptr[out] [-2, -2] [ni + 4, nj + 4] [4]
* @param eos The EOS
* @param bc The boundary condition type
* @param mass_list A list of point mass objects
* @param alpha The alpha-viscosity parameter
* @param a The RK averaging parameter
* @param dt The time step
* @param velocity_ceiling Safety parameters
* @param cooling_coefficient Safety parameters
* @param mach_ceiling Safety parameters
* @param density_floor Safety parameters
* @param pressure_floor Safety parameters
* @param constant_softening Ignore local disk height (use softening radius only)
* @param mode The execution mode
*/
EXTERN_C void euler2d_advance_rk(
struct Mesh mesh,
real *conserved_rk_ptr,
real *primitive_rd_ptr,
real *primitive_wr_ptr,
struct EquationOfState eos,
struct BoundaryCondition bc,
struct PointMassList mass_list,
real alpha,
real a,
real dt,
real velocity_ceiling,
real cooling_coefficient,
real mach_ceiling,
real density_floor,
real pressure_floor,
int constant_softening,
enum ExecutionMode mode)
{
struct Patch conserved_rk = patch(mesh, NCONS, 0, conserved_rk_ptr);
struct Patch primitive_rd = patch(mesh, NCONS, 2, primitive_rd_ptr);
struct Patch primitive_wr = patch(mesh, NCONS, 2, primitive_wr_ptr);
switch (mode) {
case CPU: {
if (alpha == 0.0) {
FOR_EACH(conserved_rk) {
advance_rk_zone_inviscid(mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening,
i, j
);
}
} else {
FOR_EACH(conserved_rk) {
advance_rk_zone(mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
alpha,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening,
i, j
);
}
}
break;
}
case OMP: {
#ifdef _OPENMP
if (alpha == 0.0) {
FOR_EACH_OMP(conserved_rk) {
advance_rk_zone_inviscid(mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening,
i, j
);
}
} else {
FOR_EACH_OMP(conserved_rk) {
advance_rk_zone(mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
alpha,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening,
i, j
);
}
}
break;
#endif
break;
}
case GPU: {
#if defined(__NVCC__) || defined(__ROCM__)
dim3 bs = dim3(16, 16);
dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y);
if (alpha == 0.0) {
advance_rk_kernel_inviscid<<<bd, bs>>>(
mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening
);
} else {
advance_rk_kernel<<<bd, bs>>>(
mesh,
conserved_rk,
primitive_rd,
primitive_wr,
eos,
bc,
mass_list,
alpha,
a,
dt,
velocity_ceiling,
cooling_coefficient,
mach_ceiling,
density_floor,
pressure_floor,
constant_softening
);
}
#endif
break;
}
}
}
/**
* Fill a buffer with the source terms that would result from a single point
* mass. The result is the rate of surface density addition (will be negative
* for positive sink rate), and the gravitational force surface densities in
* each zone.
* @param mesh The mesh [ni, nj]
* @param primitive_ptr[in] [-2, -2] [ni + 4, nj + 4] [4]
* @param cons_rate_ptr[out] [ 0, 0] [ni, nj] [1]
* @param mass_list A list of point mass objects
* @param mass A point mass
* @param mode The execution mode
* @param constant_softening Ignore local disk height (use softening radius only)
*/
EXTERN_C void euler2d_point_mass_source_term(
struct Mesh mesh,
real *primitive_ptr,
real *cons_rate_ptr,
struct PointMassList mass_list,
struct PointMass mass,
enum ExecutionMode mode,
int constant_softening)
{
struct Patch primitive = patch(mesh, NCONS, 2, primitive_ptr);
struct Patch cons_rate = patch(mesh, NCONS, 0, cons_rate_ptr);
switch (mode) {
case CPU: {
FOR_EACH(cons_rate) {
point_mass_source_term_zone(mesh, primitive, cons_rate, mass_list, mass, constant_softening, i, j);
}
break;
}
case OMP: {
#ifdef _OPENMP
FOR_EACH_OMP(cons_rate) {
point_mass_source_term_zone(mesh, primitive, cons_rate, mass_list, mass, constant_softening, i, j);
}
#endif
break;
}
case GPU: {
#if defined(__NVCC__) || defined(__ROCM__)
dim3 bs = dim3(16, 16);
dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y);
point_mass_source_term_kernel<<<bd, bs>>>(mesh, primitive, cons_rate, mass_list, mass, constant_softening);
#endif
break;
}
}
}
/**
* Fill a buffer with the maximum wavespeed in each zone.
* @param mesh The mesh [ni, nj]
* @param primitive_ptr[in] [-2, -2] [ni + 4, nj + 4] [4]
* @param wavespeed_ptr[out] [ 0, 0] [ni, nj] [1]
* @param eos The EOS
* @param mode The execution mode
*/
EXTERN_C void euler2d_wavespeed(
struct Mesh mesh,
real *primitive_ptr,
real *wavespeed_ptr,
struct EquationOfState eos,
enum ExecutionMode mode)
{
struct Patch primitive = patch(mesh, NCONS, 2, primitive_ptr);
struct Patch wavespeed = patch(mesh, 1, 0, wavespeed_ptr);
switch (mode) {
case CPU: {
FOR_EACH(wavespeed) {
wavespeed_zone(eos, primitive, wavespeed, i, j);
}
break;
}
case OMP: {
#ifdef _OPENMP
FOR_EACH_OMP(wavespeed) {
wavespeed_zone(eos, primitive, wavespeed, i, j);
}
#endif
break;
}
case GPU: {
#if defined(__NVCC__) || defined(__ROCM__)
dim3 bs = dim3(16, 16);
dim3 bd = dim3((mesh.nj + bs.x - 1) / bs.x, (mesh.ni + bs.y - 1) / bs.y);
wavespeed_kernel<<<bd, bs>>>(mesh, eos, primitive, wavespeed);
#endif
break;
}
}
}
/**
* Obtain the maximum value in an array of double's, using either a sequential
* or an OpenMP reduction. Not implemented for GPU execution.
*
* @param data The data [size]
* @param size The number of elements
* @param mode The execution mode
*/
EXTERN_C real euler2d_maximum(
real *data,
unsigned long size,
enum ExecutionMode mode)
{
real a_max = 0.0;
switch (mode) {
case CPU: {
for (unsigned long i = 0; i < size; ++i)
{
a_max = max2(a_max, data[i]);
}
break;
}
case OMP: {
#ifdef _OPENMP
#pragma omp parallel for reduction(max:a_max)
for (unsigned long i = 0; i < size; ++i)
{
a_max = max2(a_max, data[i]);
}
#endif
break;
}
case GPU: break; // Not implemented, use euler2d_wavespeed
// followed by a GPU reduction.
}
return a_max;
}
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// author:BUG1989 (https://github.com/BUG1989/) Long-term support.
// author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(4 * 4, inch, outch, 2ul);
// G
const short ktm[4][3] = {
{2, 0, 0},
{1, 1, 1},
{1, -1, 1},
{0, 0, 2}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 4; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, 2u);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16;
const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16;
const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16;
const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16;
const short* kernel4 = (const short*)kernel_tm + (p + 4) * inch * 16;
const short* kernel5 = (const short*)kernel_tm + (p + 5) * inch * 16;
const short* kernel6 = (const short*)kernel_tm + (p + 6) * inch * 16;
const short* kernel7 = (const short*)kernel_tm + (p + 7) * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 16;
kernel1 += 16;
kernel2 += 16;
kernel3 += 16;
kernel4 += 16;
kernel5 += 16;
kernel6 += 16;
kernel7 += 16;
}
}
for (; p + 3 < outch; p += 4)
{
const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16;
const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16;
const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16;
const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 16;
kernel1 += 16;
kernel2 += 16;
kernel3 += 16;
}
}
for (; p < outch; p++)
{
const short* kernel0 = (const short*)kernel_tm + p * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 16;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 4, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// load
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v1.8b}, [%1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.8b}, [%2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v3.8b}, [%3] \n"
// w = B_t * d, trans int8 to int16
"ssubl v4.8h, v0.8b, v2.8b \n" // d4
"saddl v5.8h, v1.8b, v2.8b \n" // d6
"ssubl v6.8h, v2.8b, v1.8b \n" // d8
"ssubl v7.8h, v3.8b, v1.8b \n" // d10
// transpose w to w_t
"trn1 v8.4h, v4.4h, v5.4h \n"
"trn2 v9.4h, v4.4h, v5.4h \n"
"trn1 v10.4h, v6.4h, v7.4h \n"
"trn2 v11.4h, v6.4h, v7.4h \n"
"trn1 v0.2s, v8.2s, v10.2s \n"
"trn2 v2.2s, v8.2s, v10.2s \n"
"trn1 v1.2s, v9.2s, v11.2s \n"
"trn2 v3.2s, v9.2s, v11.2s \n"
// U = B_t * d_t
"sub v4.4h, v0.4h, v2.4h \n"
"add v5.4h, v1.4h, v2.4h \n"
"sub v6.4h, v2.4h, v1.4h \n"
"sub v7.4h, v3.4h, v1.4h \n"
// save
"st1 {v4.4h}, [%4] \n"
"st1 {v5.4h}, [%5] \n"
"st1 {v6.4h}, [%6] \n"
"st1 {v7.4h}, [%7] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(out_tm0), // %4
"=r"(out_tm1), // %5
"=r"(out_tm2), // %6
"=r"(out_tm3) // %7
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"4"(out_tm0),
"5"(out_tm1),
"6"(out_tm2),
"7"(out_tm3)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else
asm volatile(
// load
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"pld [%1, #64] \n"
"vld1.s8 {d1}, [%1] \n"
"pld [%2, #64] \n"
"vld1.s8 {d2}, [%2] \n"
"pld [%3, #64] \n"
"vld1.s8 {d3}, [%3] \n"
// w = B_t * d, trans int8 to int16
"vsubl.s8 q2, d0, d2 \n" // d4
"vaddl.s8 q3, d1, d2 \n" // d6
"vsubl.s8 q4, d2, d1 \n" // d8
"vsubl.s8 q5, d3, d1 \n" // d10
// transpose w to w_t
"vtrn.s16 d4, d6 \n"
"vtrn.s16 d8, d10 \n"
"vtrn.s32 d4, d8 \n"
"vtrn.s32 d6, d10 \n"
// U = B_t * d_t
"vsub.s16 d11, d4, d8 \n"
"vadd.s16 d12, d6, d8 \n"
"vsub.s16 d13, d8, d6 \n"
"vsub.s16 d14, d10, d6 \n"
// save
"vst1.s32 {d11}, [%4] \n"
"vst1.s32 {d12}, [%5] \n"
"vst1.s32 {d13}, [%6] \n"
"vst1.s32 {d14}, [%7] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(out_tm0), // %4
"=r"(out_tm1), // %5
"=r"(out_tm2), // %6
"=r"(out_tm3) // %7
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"4"(out_tm0),
"5"(out_tm1),
"6"(out_tm2),
"7"(out_tm3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#endif // __aarch64__
#else
short d0[4], d1[4], d2[4], d3[4];
short w0[4], w1[4], w2[4], w3[4];
short t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm1[n] = d1[n];
out_tm2[n] = d2[n];
out_tm3[n] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 4; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 16;
output1_tm += 16;
output2_tm += 16;
output3_tm += 16;
output4_tm += 16;
output5_tm += 16;
output6_tm += 16;
output7_tm += 16;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 16;
output1_tm += 16;
output2_tm += 16;
output3_tm += 16;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
//"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%1] \n"
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
#if __ARM_NEON
int32x2_t _shift = vdup_n_s32(-2);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
int* outRow0 = top_blob_bordered.channel(p);
int* outRow1 = outRow0 + outw;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2;
"sub v1.4s, v1.4s, v2.4s \n"
"add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3;
"add v1.4s, v1.4s, v3.4s \n"
"trn1 v4.4s, v0.4s, v1.4s \n"
"trn2 v5.4s, v0.4s, v1.4s \n"
"dup v6.2d, v4.d[1] \n"
"dup v7.2d, v5.d[1] \n"
"add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2;
"sub v1.2s, v5.2s, v6.2s \n"
"add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3;
"add v1.2s, v1.2s, v7.2s \n"
"sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2
"sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2
"st1 {v0.2s}, [%1], #8 \n"
"st1 {v1.2s}, [%2], #8 \n"
: "=r"(out_tile), // %0
"=r"(outRow0), // %1
"=r"(outRow1) // %2
: "0"(out_tile),
"1"(outRow0),
"2"(outRow1),
"w"(_shift) // %6
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2;
"vsubq.s32 q1, q1, q2 \n"
"vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3;
"vaddq.s32 q1, q1, q3 \n"
"vtrn.s32 q0, q1 \n"
"vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2;
"vsub.s32 d9, d2, d1 \n"
"vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3;
"vadd.s32 d9, d9, d3 \n"
"vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2
"vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2
"vst1.s32 {d8}, [%1]! \n"
"vst1.s32 {d9}, [%2]! \n"
: "=r"(out_tile), // %0
"=r"(outRow0), // %1
"=r"(outRow1) // %2
: "0"(out_tile),
"1"(outRow0),
"2"(outRow1),
"w"(_shift) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q4");
#endif // __aarch64__
#else
int s0[4], s1[4], s2[4], s3[4];
int w0[4], w1[4];
int d0[2], d1[2], d2[2], d3[2];
int o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
out_tile += 16;
outRow0 += 2;
outRow1 += 2;
#endif // __ARM_NEON
}
outRow0 += outw;
outRow1 += outw;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch, 2ul);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, 2u);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
const short* kernel1 = (const short*)kernel_tm.channel(p + 1);
const short* kernel2 = (const short*)kernel_tm.channel(p + 2);
const short* kernel3 = (const short*)kernel_tm.channel(p + 3);
const short* kernel4 = (const short*)kernel_tm.channel(p + 4);
const short* kernel5 = (const short*)kernel_tm.channel(p + 5);
const short* kernel6 = (const short*)kernel_tm.channel(p + 6);
const short* kernel7 = (const short*)kernel_tm.channel(p + 7);
short* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
const short* kernel1 = (const short*)kernel_tm.channel(p + 1);
const short* kernel2 = (const short*)kernel_tm.channel(p + 2);
const short* kernel3 = (const short*)kernel_tm.channel(p + 3);
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q);
short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q);
short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q);
short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q);
short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
int8x8_t _d0, _d1, _d2, _d3, _d4, _d5;
int16x8_t _w0, _w1, _w2, _w3, _w4, _w5;
int16x8_t _t0, _t1, _t2, _t3, _t4, _t5;
int16x8_t _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = vld1_s8(r0);
_d1 = vld1_s8(r1);
_d2 = vld1_s8(r2);
_d3 = vld1_s8(r3);
_d4 = vld1_s8(r4);
_d5 = vld1_s8(r5);
int8x8_t _1_n = vdup_n_s8(-1);
int8x8_t _2_p = vdup_n_s8(2);
int8x8_t _2_n = vdup_n_s8(-2);
int8x8_t _4_p = vdup_n_s8(4);
int8x8_t _4_n = vdup_n_s8(-4);
int8x8_t _5_n = vdup_n_s8(-5);
int16x8_t _1_n_s16 = vdupq_n_s16(-1);
int16x8_t _2_p_s16 = vdupq_n_s16(2);
int16x8_t _2_n_s16 = vdupq_n_s16(-2);
int16x8_t _4_p_s16 = vdupq_n_s16(4);
int16x8_t _4_n_s16 = vdupq_n_s16(-4);
int16x8_t _5_n_s16 = vdupq_n_s16(-5);
// w = B_t * d
_w0 = vmull_s8(_d0, _4_p);
_w0 = vmlal_s8(_w0, _d2, _5_n);
_w0 = vaddw_s8(_w0, _d4);
_w1 = vmull_s8(_d1, _4_n);
_w1 = vmlal_s8(_w1, _d2, _4_n);
_w1 = vaddw_s8(_w1, _d3);
_w1 = vaddw_s8(_w1, _d4);
_w2 = vmull_s8(_d1, _4_p);
_w2 = vmlal_s8(_w2, _d2, _4_n);
_w2 = vmlal_s8(_w2, _d3, _1_n);
_w2 = vaddw_s8(_w2, _d4);
_w3 = vmull_s8(_d1, _2_n);
_w3 = vmlal_s8(_w3, _d2, _1_n);
_w3 = vmlal_s8(_w3, _d3, _2_p);
_w3 = vaddw_s8(_w3, _d4);
_w4 = vmull_s8(_d1, _2_p);
_w4 = vmlal_s8(_w4, _d2, _1_n);
_w4 = vmlal_s8(_w4, _d3, _2_n);
_w4 = vaddw_s8(_w4, _d4);
_w5 = vmull_s8(_d1, _4_p);
_w5 = vmlal_s8(_w5, _d3, _5_n);
_w5 = vaddw_s8(_w5, _d5);
// transpose d to d_t
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
// d = B_t * d_t
_n0 = vmulq_s16(_t0, _4_p_s16);
_n0 = vmlaq_s16(_n0, _t2, _5_n_s16);
_n0 = vaddq_s16(_n0, _t4);
_n1 = vmulq_s16(_t1, _4_n_s16);
_n1 = vmlaq_s16(_n1, _t2, _4_n_s16);
_n1 = vaddq_s16(_n1, _t3);
_n1 = vaddq_s16(_n1, _t4);
_n2 = vmulq_s16(_t1, _4_p_s16);
_n2 = vmlaq_s16(_n2, _t2, _4_n_s16);
_n2 = vmlaq_s16(_n2, _t3, _1_n_s16);
_n2 = vaddq_s16(_n2, _t4);
_n3 = vmulq_s16(_t1, _2_n_s16);
_n3 = vmlaq_s16(_n3, _t2, _1_n_s16);
_n3 = vmlaq_s16(_n3, _t3, _2_p_s16);
_n3 = vaddq_s16(_n3, _t4);
_n4 = vmulq_s16(_t1, _2_p_s16);
_n4 = vmlaq_s16(_n4, _t2, _1_n_s16);
_n4 = vmlaq_s16(_n4, _t3, _2_n_s16);
_n4 = vaddq_s16(_n4, _t4);
_n5 = vmulq_s16(_t1, _4_p_s16);
_n5 = vmlaq_s16(_n5, _t3, _5_n_s16);
_n5 = vaddq_s16(_n5, _t5);
// save to out_tm
out_tm0[0] = _n0[0];
out_tm0[1] = _n0[1];
out_tm0[2] = _n0[2];
out_tm0[3] = _n0[3];
out_tm1[0] = _n0[4];
out_tm1[1] = _n0[5];
out_tm1[2] = _n1[0];
out_tm1[3] = _n1[1];
out_tm2[0] = _n1[2];
out_tm2[1] = _n1[3];
out_tm2[2] = _n1[4];
out_tm2[3] = _n1[5];
out_tm3[0] = _n2[0];
out_tm3[1] = _n2[1];
out_tm3[2] = _n2[2];
out_tm3[3] = _n2[3];
out_tm4[0] = _n2[4];
out_tm4[1] = _n2[5];
out_tm4[2] = _n3[0];
out_tm4[3] = _n3[1];
out_tm5[0] = _n3[2];
out_tm5[1] = _n3[3];
out_tm5[2] = _n3[4];
out_tm5[3] = _n3[5];
out_tm6[0] = _n4[0];
out_tm6[1] = _n4[1];
out_tm6[2] = _n4[2];
out_tm6[3] = _n4[3];
out_tm7[0] = _n4[4];
out_tm7[1] = _n4[5];
out_tm7[2] = _n5[0];
out_tm7[3] = _n5[1];
out_tm8[0] = _n5[2];
out_tm8[1] = _n5[3];
out_tm8[2] = _n5[4];
out_tm8[3] = _n5[5];
#else
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0);
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else // __ARM_NEON
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __ARM_NEON
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// int* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
// const short* k0 = kernel0_tm.row<short>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
int* outRow0 = top_blob_bordered.channel(p);
int* outRow1 = outRow0 + outw;
int* outRow2 = outRow0 + outw * 2;
int* outRow3 = outRow0 + outw * 3;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
int32x4_t _s0, _s1, _s2, _s3, _s4, _s5;
int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n;
int32x4_t _w0, _w3;
int32x2_t _w0n, _w3n;
int32x4_t _d0, _d1, _d2, _d3, _d4, _d5;
int32x4_t _o0, _o1, _o2, _o3;
// load
_s0 = vld1q_s32(out_tile);
_s0n = vld1_s32(out_tile + 4);
_s1 = vld1q_s32(out_tile + 6);
_s1n = vld1_s32(out_tile + 10);
_s2 = vld1q_s32(out_tile + 12);
_s2n = vld1_s32(out_tile + 16);
_s3 = vld1q_s32(out_tile + 18);
_s3n = vld1_s32(out_tile + 22);
_s4 = vld1q_s32(out_tile + 24);
_s4n = vld1_s32(out_tile + 28);
_s5 = vld1q_s32(out_tile + 30);
_s5n = vld1_s32(out_tile + 34);
// w = A_T * W
int32x2_t _tp0 = {1, 4};
int32x2_t _tp1 = {2, 8};
// 4*s5[n]
int32x4_t _s5x4 = vshlq_n_s32(_s5, 2);
int32x2_t _s5x4n = vshl_n_s32(_s5n, 2);
int32x4_t _t1p2 = vaddq_s32(_s1, _s2);
int32x2_t _t1p2n = vadd_s32(_s1n, _s2n);
int32x4_t _t3p4 = vaddq_s32(_s3, _s4);
int32x2_t _t3p4n = vadd_s32(_s3n, _s4n);
int32x4_t _t1s2 = vsubq_s32(_s1, _s2);
int32x2_t _t1s2n = vsub_s32(_s1n, _s2n);
int32x4_t _t3s4 = vsubq_s32(_s3, _s4);
int32x2_t _t3s4n = vsub_s32(_s3n, _s4n);
_w0 = vaddq_s32(_s0, _t1p2);
_w0n = vadd_s32(_s0n, _t1p2n);
_w0 = vaddq_s32(_w0, _t3p4);
_w0n = vadd_s32(_w0n, _t3p4n);
_w0n = vmul_s32(_w0n, _tp0);
// _w2,_w2n
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1);
_t1p2n = vmul_s32(_t1p2n, _tp0);
_w3 = vaddq_s32(_s5x4, _t1s2);
_w3n = vadd_s32(_s5x4n, _t1s2n);
_w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1);
_w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1);
_w3n = vmul_s32(_w3n, _tp0);
// _w1, _w1n
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
_t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0);
_t1s2n = vmul_s32(_t1s2n, _tp0);
int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n);
int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n);
// transpose w to w_t
#if __aarch64__
int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2);
int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2);
int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3);
int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3);
int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
_d0 = vreinterpretq_s32_s64(_dt0);
_d1 = vreinterpretq_s32_s64(_dt1);
_d2 = vreinterpretq_s32_s64(_dt2);
_d3 = vreinterpretq_s32_s64(_dt3);
_d4 = vtrn1q_s32(_w02n, _w13n);
_d5 = vtrn2q_s32(_w02n, _w13n);
#else
asm volatile(
"vtrn.32 %q[_w0], %q[_w1] \n"
"vtrn.32 %q[_w2], %q[_w3] \n"
"vswp %f[_w0], %e[_w2] \n"
"vswp %f[_w1], %e[_w3] \n"
"vtrn.32 %q[_w02n], %q[_w13n] \n"
: [_w0] "+w"(_w0),
[_w1] "+w"(_t1s2),
[_w2] "+w"(_t1p2),
[_w3] "+w"(_w3),
[_w02n] "+w"(_w02n),
[_w13n] "+w"(_w13n)
:
: "cc", "memory");
_d0 = _w0;
_d1 = _t1s2;
_d2 = _t1p2;
_d3 = _w3;
_d4 = _w02n;
_d5 = _w13n;
#endif
// Y = A_T * w_t
_t1p2 = vaddq_s32(_d1, _d2);
_t3p4 = vaddq_s32(_d3, _d4);
_t1s2 = vsubq_s32(_d1, _d2);
_t3s4 = vsubq_s32(_d3, _d4);
_o0 = vaddq_s32(_d0, _t1p2);
_o0 = vaddq_s32(_o0, _t3p4);
// _o2
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_o3 = vaddq_s32(_d5, _t1s2);
_o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1);
// _o1
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
// save to top blob tm
float32x4_t _ot0 = vcvtq_f32_s32(_o0);
float32x4_t _ot1 = vcvtq_f32_s32(_t1s2);
float32x4_t _ot2 = vcvtq_f32_s32(_t1p2);
float32x4_t _ot3 = vcvtq_f32_s32(_o3);
_ot0 = vmulq_n_f32(_ot0, 0.0017361112);
_ot1 = vmulq_n_f32(_ot1, 0.0017361112);
_ot2 = vmulq_n_f32(_ot2, 0.0017361112);
_ot3 = vmulq_n_f32(_ot3, 0.0017361112);
_o0 = vcvtq_s32_f32(_ot0);
_o1 = vcvtq_s32_f32(_ot1);
_o2 = vcvtq_s32_f32(_ot2);
_o3 = vcvtq_s32_f32(_ot3);
vst1q_s32(outRow0, _o0);
vst1q_s32(outRow1, _o1);
vst1q_s32(outRow2, _o2);
vst1q_s32(outRow3, _o3);
#else
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 5; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n];
}
for (int n = 5; n < 6; n++)
{
w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]);
w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]);
w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]);
w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]);
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
#endif // __ARM_NEON
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, std::vector<float> scales_dequant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q);
short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q);
short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q);
short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q);
short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
int8x8_t _d0, _d1, _d2, _d3, _d4, _d5;
int16x8_t _w0, _w1, _w2, _w3, _w4, _w5;
int16x8_t _t0, _t1, _t2, _t3, _t4, _t5;
int16x8_t _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = vld1_s8(r0);
_d1 = vld1_s8(r1);
_d2 = vld1_s8(r2);
_d3 = vld1_s8(r3);
_d4 = vld1_s8(r4);
_d5 = vld1_s8(r5);
int8x8_t _1_n = vdup_n_s8(-1);
int8x8_t _2_p = vdup_n_s8(2);
int8x8_t _2_n = vdup_n_s8(-2);
int8x8_t _4_p = vdup_n_s8(4);
int8x8_t _4_n = vdup_n_s8(-4);
int8x8_t _5_n = vdup_n_s8(-5);
int16x8_t _1_n_s16 = vdupq_n_s16(-1);
int16x8_t _2_p_s16 = vdupq_n_s16(2);
int16x8_t _2_n_s16 = vdupq_n_s16(-2);
int16x8_t _4_p_s16 = vdupq_n_s16(4);
int16x8_t _4_n_s16 = vdupq_n_s16(-4);
int16x8_t _5_n_s16 = vdupq_n_s16(-5);
// w = B_t * d
_w0 = vmull_s8(_d0, _4_p);
_w0 = vmlal_s8(_w0, _d2, _5_n);
_w0 = vaddw_s8(_w0, _d4);
_w1 = vmull_s8(_d1, _4_n);
_w1 = vmlal_s8(_w1, _d2, _4_n);
_w1 = vaddw_s8(_w1, _d3);
_w1 = vaddw_s8(_w1, _d4);
_w2 = vmull_s8(_d1, _4_p);
_w2 = vmlal_s8(_w2, _d2, _4_n);
_w2 = vmlal_s8(_w2, _d3, _1_n);
_w2 = vaddw_s8(_w2, _d4);
_w3 = vmull_s8(_d1, _2_n);
_w3 = vmlal_s8(_w3, _d2, _1_n);
_w3 = vmlal_s8(_w3, _d3, _2_p);
_w3 = vaddw_s8(_w3, _d4);
_w4 = vmull_s8(_d1, _2_p);
_w4 = vmlal_s8(_w4, _d2, _1_n);
_w4 = vmlal_s8(_w4, _d3, _2_n);
_w4 = vaddw_s8(_w4, _d4);
_w5 = vmull_s8(_d1, _4_p);
_w5 = vmlal_s8(_w5, _d3, _5_n);
_w5 = vaddw_s8(_w5, _d5);
// transpose d to d_t
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
// d = B_t * d_t
_n0 = vmulq_s16(_t0, _4_p_s16);
_n0 = vmlaq_s16(_n0, _t2, _5_n_s16);
_n0 = vaddq_s16(_n0, _t4);
_n1 = vmulq_s16(_t1, _4_n_s16);
_n1 = vmlaq_s16(_n1, _t2, _4_n_s16);
_n1 = vaddq_s16(_n1, _t3);
_n1 = vaddq_s16(_n1, _t4);
_n2 = vmulq_s16(_t1, _4_p_s16);
_n2 = vmlaq_s16(_n2, _t2, _4_n_s16);
_n2 = vmlaq_s16(_n2, _t3, _1_n_s16);
_n2 = vaddq_s16(_n2, _t4);
_n3 = vmulq_s16(_t1, _2_n_s16);
_n3 = vmlaq_s16(_n3, _t2, _1_n_s16);
_n3 = vmlaq_s16(_n3, _t3, _2_p_s16);
_n3 = vaddq_s16(_n3, _t4);
_n4 = vmulq_s16(_t1, _2_p_s16);
_n4 = vmlaq_s16(_n4, _t2, _1_n_s16);
_n4 = vmlaq_s16(_n4, _t3, _2_n_s16);
_n4 = vaddq_s16(_n4, _t4);
_n5 = vmulq_s16(_t1, _4_p_s16);
_n5 = vmlaq_s16(_n5, _t3, _5_n_s16);
_n5 = vaddq_s16(_n5, _t5);
// save to out_tm
out_tm0[0] = _n0[0];
out_tm0[1] = _n0[1];
out_tm0[2] = _n0[2];
out_tm0[3] = _n0[3];
out_tm1[0] = _n0[4];
out_tm1[1] = _n0[5];
out_tm1[2] = _n1[0];
out_tm1[3] = _n1[1];
out_tm2[0] = _n1[2];
out_tm2[1] = _n1[3];
out_tm2[2] = _n1[4];
out_tm2[3] = _n1[5];
out_tm3[0] = _n2[0];
out_tm3[1] = _n2[1];
out_tm3[2] = _n2[2];
out_tm3[3] = _n2[3];
out_tm4[0] = _n2[4];
out_tm4[1] = _n2[5];
out_tm4[2] = _n3[0];
out_tm4[3] = _n3[1];
out_tm5[0] = _n3[2];
out_tm5[1] = _n3[3];
out_tm5[2] = _n3[4];
out_tm5[3] = _n3[5];
out_tm6[0] = _n4[0];
out_tm6[1] = _n4[1];
out_tm6[2] = _n4[2];
out_tm6[3] = _n4[3];
out_tm7[0] = _n4[4];
out_tm7[1] = _n4[5];
out_tm7[2] = _n5[0];
out_tm7[3] = _n5[1];
out_tm8[0] = _n5[2];
out_tm8[1] = _n5[3];
out_tm8[2] = _n5[4];
out_tm8[3] = _n5[5];
#else
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0);
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else // __ARM_NEON
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __ARM_NEON
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// int* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
// const short* k0 = kernel0_tm.row<short>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_dequant0 = scales_dequant[p];
const float scale0 = scale_dequant0 / 576.0;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
int32x4_t _s0, _s1, _s2, _s3, _s4, _s5;
int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n;
int32x4_t _w0, _w3;
int32x2_t _w0n, _w3n;
int32x4_t _d0, _d1, _d2, _d3, _d4, _d5;
int32x4_t _o0, _o3;
// load
_s0 = vld1q_s32(out_tile);
_s0n = vld1_s32(out_tile + 4);
_s1 = vld1q_s32(out_tile + 6);
_s1n = vld1_s32(out_tile + 10);
_s2 = vld1q_s32(out_tile + 12);
_s2n = vld1_s32(out_tile + 16);
_s3 = vld1q_s32(out_tile + 18);
_s3n = vld1_s32(out_tile + 22);
_s4 = vld1q_s32(out_tile + 24);
_s4n = vld1_s32(out_tile + 28);
_s5 = vld1q_s32(out_tile + 30);
_s5n = vld1_s32(out_tile + 34);
// w = A_T * W
int32x2_t _tp0 = {1, 4};
int32x2_t _tp1 = {2, 8};
// 4*s5[n]
int32x4_t _s5x4 = vshlq_n_s32(_s5, 2);
int32x2_t _s5x4n = vshl_n_s32(_s5n, 2);
int32x4_t _t1p2 = vaddq_s32(_s1, _s2);
int32x2_t _t1p2n = vadd_s32(_s1n, _s2n);
int32x4_t _t3p4 = vaddq_s32(_s3, _s4);
int32x2_t _t3p4n = vadd_s32(_s3n, _s4n);
int32x4_t _t1s2 = vsubq_s32(_s1, _s2);
int32x2_t _t1s2n = vsub_s32(_s1n, _s2n);
int32x4_t _t3s4 = vsubq_s32(_s3, _s4);
int32x2_t _t3s4n = vsub_s32(_s3n, _s4n);
_w0 = vaddq_s32(_s0, _t1p2);
_w0n = vadd_s32(_s0n, _t1p2n);
_w0 = vaddq_s32(_w0, _t3p4);
_w0n = vadd_s32(_w0n, _t3p4n);
_w0n = vmul_s32(_w0n, _tp0);
// _w2,_w2n
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1);
_t1p2n = vmul_s32(_t1p2n, _tp0);
_w3 = vaddq_s32(_s5x4, _t1s2);
_w3n = vadd_s32(_s5x4n, _t1s2n);
_w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1);
_w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1);
_w3n = vmul_s32(_w3n, _tp0);
// _w1, _w1n
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
_t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0);
_t1s2n = vmul_s32(_t1s2n, _tp0);
int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n);
int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n);
// transpose w to w_t
#if __aarch64__
int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2);
int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2);
int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3);
int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3);
int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
_d0 = vreinterpretq_s32_s64(_dt0);
_d1 = vreinterpretq_s32_s64(_dt1);
_d2 = vreinterpretq_s32_s64(_dt2);
_d3 = vreinterpretq_s32_s64(_dt3);
_d4 = vtrn1q_s32(_w02n, _w13n);
_d5 = vtrn2q_s32(_w02n, _w13n);
#else
asm volatile(
"vtrn.32 %q[_w0], %q[_w1] \n"
"vtrn.32 %q[_w2], %q[_w3] \n"
"vswp %f[_w0], %e[_w2] \n"
"vswp %f[_w1], %e[_w3] \n"
"vtrn.32 %q[_w02n], %q[_w13n] \n"
: [_w0] "+w"(_w0),
[_w1] "+w"(_t1s2),
[_w2] "+w"(_t1p2),
[_w3] "+w"(_w3),
[_w02n] "+w"(_w02n),
[_w13n] "+w"(_w13n)
:
: "cc", "memory");
_d0 = _w0;
_d1 = _t1s2;
_d2 = _t1p2;
_d3 = _w3;
_d4 = _w02n;
_d5 = _w13n;
#endif
// Y = A_T * w_t
_t1p2 = vaddq_s32(_d1, _d2);
_t3p4 = vaddq_s32(_d3, _d4);
_t1s2 = vsubq_s32(_d1, _d2);
_t3s4 = vsubq_s32(_d3, _d4);
_o0 = vaddq_s32(_d0, _t1p2);
_o0 = vaddq_s32(_o0, _t3p4);
// _o2
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_o3 = vaddq_s32(_d5, _t1s2);
_o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1);
// _o1
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
// save to top blob tm
float32x4_t _scale0 = vdupq_n_f32(scale0);
float32x4_t _out0_f32 = vdupq_n_f32(bias0);
float32x4_t _out1_f32 = vdupq_n_f32(bias0);
float32x4_t _out2_f32 = vdupq_n_f32(bias0);
float32x4_t _out3_f32 = vdupq_n_f32(bias0);
_out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0);
_out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_t1s2), _scale0);
_out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_t1p2), _scale0);
_out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0);
vst1q_f32(outRow0, _out0_f32);
vst1q_f32(outRow1, _out1_f32);
vst1q_f32(outRow2, _out2_f32);
vst1q_f32(outRow3, _out3_f32);
#else
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 5; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n];
}
for (int n = 5; n < 6; n++)
{
w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]);
w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]);
w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]);
w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]);
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = (float)o0[n] * scale0 + bias0;
outRow1[n] = (float)o1[n] * scale0 + bias0;
outRow2[n] = (float)o2[n] * scale0 + bias0;
outRow3[n] = (float)o3[n] * scale0 + bias0;
}
#endif // __ARM_NEON
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8, (size_t)1u);
const signed char* kernel = _kernel;
int p = 0;
for (; p + 7 < outch; p += 8)
{
const signed char* k0 = kernel + (p + 0) * inch * 9;
const signed char* k1 = kernel + (p + 1) * inch * 9;
const signed char* k2 = kernel + (p + 2) * inch * 9;
const signed char* k3 = kernel + (p + 3) * inch * 9;
const signed char* k4 = kernel + (p + 4) * inch * 9;
const signed char* k5 = kernel + (p + 5) * inch * 9;
const signed char* k6 = kernel + (p + 6) * inch * 9;
const signed char* k7 = kernel + (p + 7) * inch * 9;
signed char* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[0] = k0[k];
ktmp[1] = k1[k];
ktmp[2] = k2[k];
ktmp[3] = k3[k];
ktmp[4] = k4[k];
ktmp[5] = k5[k];
ktmp[6] = k6[k];
ktmp[7] = k7[k];
ktmp += 8;
}
k0 += 9;
k1 += 9;
k2 += 9;
k3 += 9;
k4 += 9;
k5 += 9;
k6 += 9;
k7 += 9;
}
}
for (; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch * 9;
signed char* ktmp = kernel_tm.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[k] = k0[k];
}
ktmp += 9;
k0 += 9;
}
}
}
static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p + 0);
Mat out1 = top_blob.channel(p + 1);
Mat out2 = top_blob.channel(p + 2);
Mat out3 = top_blob.channel(p + 3);
Mat out4 = top_blob.channel(p + 4);
Mat out5 = top_blob.channel(p + 5);
Mat out6 = top_blob.channel(p + 6);
Mat out7 = top_blob.channel(p + 7);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
out4.fill(0);
out5.fill(0);
out6.fill(0);
out7.fill(0);
const signed char* ktmp = _kernel.channel(p / 8);
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
int* outptr4 = out4;
int* outptr5 = out5;
int* outptr6 = out6;
int* outptr7 = out7;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%9], #16 \n" //r0-r2
"ld2 {v5.8b, v6.8b}, [%9] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n" //out0
"ld1 {v10.4s, v11.4s}, [%2] \n" //out1
"ld1 {v12.4s, v13.4s}, [%3] \n" //out2
"ld1 {v14.4s, v15.4s}, [%4] \n" //out3
"ld1 {v16.4s, v17.4s}, [%5] \n" //out4
"ld1 {v18.4s, v19.4s}, [%6] \n" //out5
"ld1 {v20.4s, v21.4s}, [%7] \n" //out6
"ld1 {v22.4s, v23.4s}, [%8] \n" //out7
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k00-k70)
"sshll v1.8h, v1.8b, #0 \n" //(k01-k71)
"sshll v2.8h, v2.8b, #0 \n" //(k02-k72)
"sshll v3.8h, v3.8b, #0 \n" // r0
"sshll v4.8h, v4.8b, #0 \n" // r1
"sshll v7.8h, v7.8b, #0 \n" // r2
// r0
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r00-r07)*k00
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r00-r07)*k10
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r00-r07)*k20
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r00-r07)*k30
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r00-r07)*k40
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r00-r07)*k50
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r00-r07)*k60
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r00-r07)*k70
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r1
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r10-r17)*k01
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r10-r17)*k11
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r10-r17)*k21
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r10-r17)*k31
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r10-r17)*k41
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r10-r17)*k51
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r10-r17)*k61
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r10-r17)*k71
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r2
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r20-r27)*k02
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r20-r27)*k12
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r20-r27)*k22
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r20-r27)*k32
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r20-r27)*k42
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r20-r27)*k52
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r20-r27)*k62
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r20-r27)*k72
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%10], #16 \n" //r3-r5
"ld2 {v5.8b, v6.8b}, [%10] \n"
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k03-k73)
"sshll v1.8h, v1.8b, #0 \n" //(k04-k74)
"sshll v2.8h, v2.8b, #0 \n" //(k05-k75)
"sshll v3.8h, v3.8b, #0 \n" // r3
"sshll v4.8h, v4.8b, #0 \n" // r4
"sshll v7.8h, v7.8b, #0 \n" // r5
// r3
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r30-r37)*k03
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r30-r37)*k13
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r30-r37)*k23
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r30-r37)*k33
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r30-r37)*k43
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r30-r37)*k53
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r30-r37)*k63
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r30-r37)*k73
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r4
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r40-r47)*k04
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r40-r47)*k14
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r40-r47)*k24
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r40-r47)*k34
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r40-r47)*k44
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r40-r47)*k54
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r40-r47)*k64
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r40-r47)*k74
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r5
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r50-r57)*k05
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r50-r57)*k15
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r50-r57)*k25
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r50-r57)*k35
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r50-r57)*k45
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r50-r57)*k55
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r50-r57)*k65
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r50-r57)*k75
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%11], #16 \n" //r6-r8
"ld2 {v5.8b, v6.8b}, [%11] \n"
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k06-k76)
"sshll v1.8h, v1.8b, #0 \n" //(k07-k77)
"sshll v2.8h, v2.8b, #0 \n" //(k08-k78)
"sshll v3.8h, v3.8b, #0 \n" // r6
"sshll v4.8h, v4.8b, #0 \n" // r7
"sshll v7.8h, v7.8b, #0 \n" // r8
// r6
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r60-r67)*k06
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r60-r67)*k16
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r60-r67)*k26
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r60-r67)*k36
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r60-r67)*k46
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r60-r67)*k56
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r60-r67)*k66
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r60-r67)*k76
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r7
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r70-r77)*k07
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r70-r77)*k17
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r70-r77)*k27
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r70-r77)*k37
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r70-r77)*k47
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r70-r77)*k57
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r70-r77)*k67
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r70-r77)*k77
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r8
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r80-r87)*k08
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r80-r87)*k18
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r80-r87)*k28
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r80-r87)*k38
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r80-r87)*k48
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r80-r87)*k58
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r80-r87)*k68
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r80-r87)*k78
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
"st1 {v16.4s, v17.4s}, [%5], #32 \n"
"st1 {v18.4s, v19.4s}, [%6], #32 \n"
"st1 {v20.4s, v21.4s}, [%7], #32 \n"
"st1 {v22.4s, v23.4s}, [%8], #32 \n"
"subs %w0, %w0, #1 \n"
"sub %12, %12, #72 \n" // reset ktmp
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #128] \n"
"vld1.s32 {d16-d17}, [%1] \n" // out0
"pld [%2, #128] \n"
"vld1.s32 {d18-d19}, [%2] \n" // out1
"pld [%3, #128] \n"
"vld1.s32 {d20-d21}, [%3] \n" // out2
"pld [%4, #128] \n"
"vld1.s32 {d22-d23}, [%4] \n" // out3
// r0
"pld [%9, #64] \n"
"vld2.s8 {d8-d9}, [%9] \n" // d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015)
"add %9, #8 \n"
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k00-k70) d1(k01-k71) d2(k02-k72)
"pld [%5, #128] \n"
"vld1.s32 {d24-d25}, [%5] \n" // out4
"pld [%6, #128] \n"
"vld1.s32 {d26-d27}, [%6] \n" // out5
"vmovl.s8 q2, d2 \n" // q2(k02-k72)
"vmovl.s8 q1, d1 \n" // q1(k01-k71)
"vmovl.s8 q0, d0 \n" // q0(k00-k70)
"vext.s8 d12, d8, d8, #1 \n" // d12(a02 a04 a06 a08 x x x x)
"pld [%7, #128] \n"
"vld1.s32 {d28-d29}, [%7] \n" // out6
"vmovl.s8 q5, d9 \n" // q5(a01 a03 a05 a07 a09 a011 a013 a015) d11
"vmovl.s8 q4, d8 \n" // q4(a00 a02 a04 a06 a08 a010 a012 a014) d9
"vmovl.s8 q6, d12 \n" // q6(a02 a04 a06 a08 a010 a012 a014 a016) d13
"pld [%8, #128] \n"
"vld1.s32 {d30-d31}, [%8] \n" // out7
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a00 a02 a04 a06) * k00
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a00 a02 a04 a06) * k10
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a00 a02 a04 a06) * k20
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a00 a02 a04 a06) * k30
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a00 a02 a04 a06) * k40
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a00 a02 a04 a06) * k50
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a00 a02 a04 a06) * k60
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a00 a02 a04 a06) * k70
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a01-a07) * k01
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a01-a07) * k11
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a01-a07) * k21
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a01-a07) * k31
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a01-a07) * k41
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a01-a07) * k51
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a01-a07) * k61
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a01-a07) * k71
"pld [%10, #64] \n"
"vld2.s8 {d8-d9}, [%10] \n" // d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115)
"add %10, #8 \n"
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a02-a08) * k02
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a02-a08) * k12
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a02-a08) * k22
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a02-a08) * k32
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k03-k73) d1(k04-k74) d2(k05-k75)
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a02-a08) * k42
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a02-a08) * k52
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a02-a08) * k62
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a02-a08) * k72
// r1
"vext.s8 d12, d8, d8, #1 \n" // d12(a12 a14 a16 a18 x x x x)
"vmovl.s8 q2, d2 \n" // q2(k05-k75)
"vmovl.s8 q1, d1 \n" // q1(k04-k74)
"vmovl.s8 q0, d0 \n" // q0(k03-k73)
"vmovl.s8 q5, d9 \n" // q5(a11-a115)
"vmovl.s8 q4, d8 \n" // q4(a10-a114)
"vmovl.s8 q6, d12 \n" // q6(a12-a116)
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a10-a16) * k03
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a10-a16) * k13
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a10-a16) * k23
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a10-a16) * k33
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a10-a16) * k43
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a10-a16) * k53
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a10-a16) * k63
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a10-a16) * k73
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a11-a17) * k04
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a11-a17) * k14
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a11-a17) * k24
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a11-a17) * k34
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a11-a17) * k44
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a11-a17) * k54
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a11-a17) * k64
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a11-a17) * k74
"pld [%11, #64] \n"
"vld2.s8 {d8-d9}, [%11] \n" // d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215)
"add %11, #8 \n"
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a12-a18) * k05
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a12-a18) * k15
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a12-a18) * k25
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a12-a18) * k35
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k06-k76) d1(k07-k77) d2(k08-k78)
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a12-a18) * k45
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a12-a18) * k55
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a12-a18) * k65
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a12-a18) * k75
// r2
"vext.s8 d12, d8, d8, #1 \n" // d12(a22 a24 a26 a28 x x x x)
"vmovl.s8 q2, d2 \n" // q2(k08-k78)
"vmovl.s8 q1, d1 \n" // q1(k07-k77)
"vmovl.s8 q0, d0 \n" // q0(k06-k76)
"vmovl.s8 q5, d9 \n" // q5(a21-a215)
"vmovl.s8 q4, d8 \n" // q4(a20-a214)
"vmovl.s8 q6, d12 \n" // q6(a22-a216)
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a20-a26) * k06
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a20-a26) * k16
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a20-a26) * k26
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a20-a26) * k36
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a20-a26) * k46
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a20-a26) * k56
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a20-a26) * k66
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a20-a26) * k76
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a21-a27) * k07
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a21-a27) * k17
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a21-a27) * k27
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a21-a27) * k37
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a21-a27) * k47
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a21-a27) * k57
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a21-a27) * k67
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a21-a27) * k77
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a22-a28) * k08
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a22-a28) * k18
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a22-a28) * k28
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a22-a28) * k38
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a22-a28) * k48
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a22-a28) * k58
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a22-a28) * k68
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a22-a28) * k78
// save s32 to memory
"sub %12, %12, #72 \n"
"vst1.s32 {d16-d17}, [%1]! \n" // out0
"vst1.s32 {d18-d19}, [%2]! \n" // out1
"vst1.s32 {d20-d21}, [%3]! \n" // out2
"vst1.s32 {d22-d23}, [%4]! \n" // out3
"subs %0, #1 \n"
"vst1.s32 {d24-d25}, [%5]! \n" // out4
"vst1.s32 {d26-d27}, [%6]! \n" // out5
"vst1.s32 {d28-d29}, [%7]! \n" // out6
"vst1.s32 {d30-d31}, [%8]! \n" // out7
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
#if __aarch64__
int8x8_t _r0_s8 = vld1_s8(r0); // (a00 a01 a02 ....)
int8x8_t _r1_s8 = vld1_s8(r1); // (a10 a11 a12 ....)
int8x8_t _r2_s8 = vld1_s8(r2); // (a20 a21 a22 ....)
int16x8_t _r0 = vmovl_s8(_r0_s8);
int16x8_t _r1 = vmovl_s8(_r1_s8);
int16x8_t _r2 = vmovl_s8(_r2_s8);
int32x4_t _sum03 = {};
int32x4_t _sum47 = {};
_sum03 = vld1q_lane_s32(outptr0, _sum03, 0); // out0
_sum03 = vld1q_lane_s32(outptr1, _sum03, 1); // out1
_sum03 = vld1q_lane_s32(outptr2, _sum03, 2); // out2
_sum03 = vld1q_lane_s32(outptr3, _sum03, 3); // out3
_sum47 = vld1q_lane_s32(outptr4, _sum47, 0); // out4
_sum47 = vld1q_lane_s32(outptr5, _sum47, 1); // out5
_sum47 = vld1q_lane_s32(outptr6, _sum47, 2); // out6
_sum47 = vld1q_lane_s32(outptr7, _sum47, 3); // out7
// k0 - k2
int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70)
int8x8_t _k1_8 = vld1_s8(ktmp + 8); //(k01-k71)
int8x8_t _k2_8 = vld1_s8(ktmp + 16); //(k02-k72)
int16x8_t _k0 = vmovl_s8(_k0_8);
int16x8_t _k1 = vmovl_s8(_k1_8);
int16x8_t _k2 = vmovl_s8(_k2_8);
int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0);
int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0);
int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1);
int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2);
// k3 - k5
_k0_8 = vld1_s8(ktmp + 24); //(k03-k73)
_k1_8 = vld1_s8(ktmp + 32); //(k04-k74)
_k2_8 = vld1_s8(ktmp + 40); //(k05-k75)
_k0 = vmovl_s8(_k0_8);
_k1 = vmovl_s8(_k1_8);
_k2 = vmovl_s8(_k2_8);
_sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0);
_sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0);
_sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1);
_sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2);
// k6 - k8
_k0_8 = vld1_s8(ktmp + 48); //(k06-k76)
_k1_8 = vld1_s8(ktmp + 56); //(k07-k77)
_k2_8 = vld1_s8(ktmp + 64); //(k08-k78)
_k0 = vmovl_s8(_k0_8);
_k1 = vmovl_s8(_k1_8);
_k2 = vmovl_s8(_k2_8);
_sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0);
_sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0);
_sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1);
_sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2);
_sum0 = vaddq_s32(_sum0, _sum1);
_sum0n = vaddq_s32(_sum0n, _sum1n);
_sum03 = vaddq_s32(_sum03, _sum0);
_sum47 = vaddq_s32(_sum47, _sum0n);
vst1q_lane_s32(outptr0, _sum03, 0);
vst1q_lane_s32(outptr1, _sum03, 1);
vst1q_lane_s32(outptr2, _sum03, 2);
vst1q_lane_s32(outptr3, _sum03, 3);
vst1q_lane_s32(outptr4, _sum47, 0);
vst1q_lane_s32(outptr5, _sum47, 1);
vst1q_lane_s32(outptr6, _sum47, 2);
vst1q_lane_s32(outptr7, _sum47, 3);
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#else // __aarch64__
asm volatile(
"pld [%8, #64] \n"
"vld1.s8 {d0}, [%8] \n" // d0(a00 a01 a02 ....)
"pld [%9, #64] \n"
"vld1.s8 {d2}, [%9] \n" // d2(a10 a11 a12 ....)
"pld [%10, #64] \n"
"vld1.s8 {d4}, [%10] \n" // d4(a20 a21 a22 ....)
"pld [%11, #64] \n"
"vld1.s8 {d6-d8}, [%11]! \n" // d6(k00-k70) d7(k01-k71) d8(k02-k72)
"vmovl.s8 q0, d0 \n" // d0(a00 a01 a02 x)
"vmovl.s8 q1, d2 \n" // d2(a10 a11 a12 x)
"vmovl.s8 q2, d4 \n" // d4(a20 a21 a22 x)
"vmovl.s8 q5, d8 \n" // d10(k02-k32) d11(k42-k72)
"vmovl.s8 q4, d7 \n" // d8(k01-k31) d9(k41-k71)
"vmovl.s8 q3, d6 \n" // d6(k00-k30) d7(k40-k70)
"vld1.s32 {d20[0]}, [%0] \n" // out0 q10
"vld1.s32 {d20[1]}, [%1] \n" // out1
"vld1.s32 {d21[0]}, [%2] \n" // out2
"vld1.s32 {d21[1]}, [%3] \n" // out3
"pld [%11, #64] \n"
"vld1.s8 {d24-d26}, [%11]! \n"
"vmovl.s8 q14, d26 \n" // d28(k05-k35) d29(k45-k75)
"vmovl.s8 q13, d25 \n" // d26(k04-k34) d27(k44-k74)
"vmovl.s8 q12, d24 \n" // d24(k03-k33) d25(k43-k73)
"vld1.s32 {d22[0]}, [%4] \n" // out4 q11
"vld1.s32 {d22[1]}, [%5] \n" // out5
"vld1.s32 {d23[0]}, [%6] \n" // out6
"vld1.s32 {d23[1]}, [%7] \n" // out7
"vmull.s16 q6, d6, d0[0] \n" // a00 x (k00-k30)
"vmull.s16 q7, d7, d0[0] \n" // a00 x (k40-k70)
"vmull.s16 q8, d8, d0[1] \n" // a01 x (k01-k31)
"vmull.s16 q9, d9, d0[1] \n" // a01 x (k41-k71)
"vmlal.s16 q10, d10, d0[2] \n" // a02 x (k02-k32)
"vmlal.s16 q11, d11, d0[2] \n" // a02 x (k42-k72)
"pld [%11, #64] \n"
"vld1.s8 {d6-d8}, [%11]! \n"
"vmovl.s8 q5, d8 \n" // d10(k08-k38) d11(k48-k78)
"vmovl.s8 q4, d7 \n" // d8(k07-k37) d9(k47-k77)
"vmovl.s8 q3, d6 \n" // d6(k06-k36) d7(k46-k76)
"vmlal.s16 q6, d24, d2[0] \n" // a10 x (k03-k33)
"vmlal.s16 q7, d25, d2[0] \n" // a10 x (k43-k73)
"vmlal.s16 q8, d26, d2[1] \n" // a11 x (k04-k34)
"vmlal.s16 q9, d27, d2[1] \n" // a11 x (k44-k74)
"vmlal.s16 q10, d28, d2[2] \n" // a12 x (k05-k35)
"vmlal.s16 q11, d29, d2[2] \n" // a12 x (k45-k75)
"vmlal.s16 q6, d6, d4[0] \n" // a20 x (k06-k36)
"vmlal.s16 q7, d7, d4[0] \n" // a20 x (k46-k76)
"vmlal.s16 q8, d8, d4[1] \n" // a21 x (k07-k37)
"vmlal.s16 q9, d9, d4[1] \n" // a21 x (k47-k77)
"vmlal.s16 q10, d10, d4[2] \n" // a22 x (k08-k38)
"vmlal.s16 q11, d11, d4[2] \n" // a22 x (k48-k78)
"vadd.s32 q8, q8, q6 \n"
"vadd.s32 q9, q9, q7 \n"
"sub %11, %11, #72 \n"
"vadd.s32 q10, q10, q8 \n"
"vadd.s32 q11, q11, q9 \n"
"vst1.s32 {d20[0]}, [%0]! \n" // out0
"vst1.s32 {d20[1]}, [%1]! \n" // out1
"vst1.s32 {d21[0]}, [%2]! \n" // out2
"vst1.s32 {d21[1]}, [%3]! \n" // out3
"vst1.s32 {d22[0]}, [%4]! \n" // out4
"vst1.s32 {d22[1]}, [%5]! \n" // out5
"vst1.s32 {d23[0]}, [%6]! \n" // out6
"vst1.s32 {d23[1]}, [%7]! \n" // out7
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else // __ARM_NEON
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
sum0 += (int)r0[0] * ktmp[0];
sum1 += (int)r0[0] * ktmp[1];
sum2 += (int)r0[0] * ktmp[2];
sum3 += (int)r0[0] * ktmp[3];
sum4 += (int)r0[0] * ktmp[4];
sum5 += (int)r0[0] * ktmp[5];
sum6 += (int)r0[0] * ktmp[6];
sum7 += (int)r0[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r0[1] * ktmp[0];
sum1 += (int)r0[1] * ktmp[1];
sum2 += (int)r0[1] * ktmp[2];
sum3 += (int)r0[1] * ktmp[3];
sum4 += (int)r0[1] * ktmp[4];
sum5 += (int)r0[1] * ktmp[5];
sum6 += (int)r0[1] * ktmp[6];
sum7 += (int)r0[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r0[2] * ktmp[0];
sum1 += (int)r0[2] * ktmp[1];
sum2 += (int)r0[2] * ktmp[2];
sum3 += (int)r0[2] * ktmp[3];
sum4 += (int)r0[2] * ktmp[4];
sum5 += (int)r0[2] * ktmp[5];
sum6 += (int)r0[2] * ktmp[6];
sum7 += (int)r0[2] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[0] * ktmp[0];
sum1 += (int)r1[0] * ktmp[1];
sum2 += (int)r1[0] * ktmp[2];
sum3 += (int)r1[0] * ktmp[3];
sum4 += (int)r1[0] * ktmp[4];
sum5 += (int)r1[0] * ktmp[5];
sum6 += (int)r1[0] * ktmp[6];
sum7 += (int)r1[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[1] * ktmp[0];
sum1 += (int)r1[1] * ktmp[1];
sum2 += (int)r1[1] * ktmp[2];
sum3 += (int)r1[1] * ktmp[3];
sum4 += (int)r1[1] * ktmp[4];
sum5 += (int)r1[1] * ktmp[5];
sum6 += (int)r1[1] * ktmp[6];
sum7 += (int)r1[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[2] * ktmp[0];
sum1 += (int)r1[2] * ktmp[1];
sum2 += (int)r1[2] * ktmp[2];
sum3 += (int)r1[2] * ktmp[3];
sum4 += (int)r1[2] * ktmp[4];
sum5 += (int)r1[2] * ktmp[5];
sum6 += (int)r1[2] * ktmp[6];
sum7 += (int)r1[2] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[0] * ktmp[0];
sum1 += (int)r2[0] * ktmp[1];
sum2 += (int)r2[0] * ktmp[2];
sum3 += (int)r2[0] * ktmp[3];
sum4 += (int)r2[0] * ktmp[4];
sum5 += (int)r2[0] * ktmp[5];
sum6 += (int)r2[0] * ktmp[6];
sum7 += (int)r2[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[1] * ktmp[0];
sum1 += (int)r2[1] * ktmp[1];
sum2 += (int)r2[1] * ktmp[2];
sum3 += (int)r2[1] * ktmp[3];
sum4 += (int)r2[1] * ktmp[4];
sum5 += (int)r2[1] * ktmp[5];
sum6 += (int)r2[1] * ktmp[6];
sum7 += (int)r2[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[2] * ktmp[0];
sum1 += (int)r2[2] * ktmp[1];
sum2 += (int)r2[2] * ktmp[2];
sum3 += (int)r2[2] * ktmp[3];
sum4 += (int)r2[2] * ktmp[4];
sum5 += (int)r2[2] * ktmp[5];
sum6 += (int)r2[2] * ktmp[6];
sum7 += (int)r2[2] * ktmp[7];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
ktmp -= 8 * 9;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 8 * 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char* ktmp = _kernel.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
int* outptr = out;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v0.8b, v1.8b}, [%5] \n" //ktmp
"ld2 {v2.8b, v3.8b}, [%2], #16 \n" //r0-r2
"ld2 {v4.8b, v5.8b}, [%2] \n"
"ld2 {v6.8b, v7.8b}, [%3], #16 \n" //r3-r5
"ld2 {v8.8b, v9.8b}, [%3] \n"
"ld2 {v10.8b, v11.8b}, [%4], #16 \n" //r6-r8
"ld2 {v12.8b, v13.8b}, [%4] \n"
"ld1 {v14.4s, v15.4s}, [%1] \n" //out0
"ext v4.8b, v2.8b, v4.8b, #1 \n"
"ext v8.8b, v6.8b, v8.8b, #1 \n"
"ext v12.8b, v10.8b, v12.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k0-k7)
"sshll v1.8h, v1.8b, #0 \n" //(k8)
"sshll v2.8h, v2.8b, #0 \n" // r0
"sshll v3.8h, v3.8b, #0 \n" // r1
"sshll v4.8h, v4.8b, #0 \n" // r2
"sshll v6.8h, v6.8b, #0 \n" // r3
"sshll v7.8h, v7.8b, #0 \n" // r4
"sshll v8.8h, v8.8b, #0 \n" // r5
"sshll v10.8h, v10.8b, #0 \n" // r6
"sshll v11.8h, v11.8b, #0 \n" // r7
"sshll v12.8h, v12.8b, #0 \n" // r8
// r0
"smull v16.4s, v2.4h, v0.h[0] \n" // out = r0*k0
"smull2 v17.4s, v2.8h, v0.h[0] \n"
"smull v18.4s, v3.4h, v0.h[1] \n" // outn = r1*k1
"smull2 v19.4s, v3.8h, v0.h[1] \n"
"smlal v16.4s, v4.4h, v0.h[2] \n" // out = r2*k2
"smlal2 v17.4s, v4.8h, v0.h[2] \n"
"smlal v18.4s, v6.4h, v0.h[3] \n" // outn = r3*k3
"smlal2 v19.4s, v6.8h, v0.h[3] \n"
"smlal v16.4s, v7.4h, v0.h[4] \n" // out = r4*k4
"smlal2 v17.4s, v7.8h, v0.h[4] \n"
"smlal v18.4s, v8.4h, v0.h[5] \n" // outn = r5*k5
"smlal2 v19.4s, v8.8h, v0.h[5] \n"
"smlal v16.4s, v10.4h, v0.h[6] \n" // out = r6*k6
"smlal2 v17.4s, v10.8h, v0.h[6] \n"
"smlal v18.4s, v11.4h, v0.h[7] \n" // outn = r7*k7
"smlal2 v19.4s, v11.8h, v0.h[7] \n"
"smlal v16.4s, v12.4h, v1.h[0] \n" // out = r8*k8
"smlal2 v17.4s, v12.8h, v1.h[0] \n"
"add v8.4s, v16.4s, v18.4s \n"
"add v9.4s, v17.4s, v19.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(ktmp) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#else
if (nn > 0)
{
asm volatile(
"vld1.s8 {d0-d1}, [%5] \n" // d0(k0 - k7) d1(k8 ...)
"vmovl.s8 q1, d1 \n" // d2(k8 ...)
"vmovl.s8 q0, d0 \n" // d0(k0 - k3) d1(k4 - k7)
"0: \n"
"pld [%2, #192] \n"
"vld2.s8 {d4-d5}, [%2]! \n" // r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015)
"vld2.s8 {d8-d9}, [%2] \n" // d8(a016 ....)
"vld2.s8 {d10-d11}, [%3]! \n" // r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115)
"vld2.s8 {d14-d15}, [%3] \n" // d14(a116 ....)
"vld2.s8 {d16-d17}, [%4]! \n" // r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215)
"vld2.s8 {d20-d21}, [%4] \n" // d20(a216 ....)
"vld1.s32 {d22-d25}, [%1] \n" // q11(out0 - out3) q12(out4 - out7)
"vext.s8 d8, d4, d8, #1 \n" // d8(a02 a04 ... a016)
"vext.s8 d14, d10, d14, #1 \n" // d14(a12 a14 ... a116)
"vext.s8 d20, d16, d20, #1 \n" // d20(a22 a24 ... a216)
"vmovl.s8 q3, d5 \n" // q3(a01 a03 ... a015)
"vmovl.s8 q2, d4 \n" // q2(a00 a02 ... a014)
"vmovl.s8 q4, d8 \n" // q4(a02 a04 ... a016)
"vmovl.s8 q6, d11 \n" // q6(a11 a13 ... a115)
"vmovl.s8 q5, d10 \n" // q5(a10 a12 ... a114)
"vmovl.s8 q7, d14 \n" // q7(a12 a14 ... a116)
"vmovl.s8 q9, d17 \n" // q9(a21 a23 ... a215)
"vmovl.s8 q8, d16 \n" // q8(a20 a22 ... a214)
"vmovl.s8 q10, d20 \n" // q10(a22 a24 ... a216)
"vmlal.s16 q11, d4, d0[0] \n" // k0
"vmlal.s16 q12, d5, d0[0] \n"
"vmull.s16 q13, d6, d0[1] \n" // k1
"vmull.s16 q14, d7, d0[1] \n"
"vmlal.s16 q11, d8, d0[2] \n" // k2
"vmlal.s16 q12, d9, d0[2] \n"
"vmlal.s16 q13, d12, d1[0] \n" // k4
"vmlal.s16 q14, d13, d1[0] \n"
"vmlal.s16 q11, d10, d0[3] \n" // k3
"vmlal.s16 q12, d11, d0[3] \n"
"vmlal.s16 q13, d14, d1[1] \n" // k5
"vmlal.s16 q14, d15, d1[1] \n"
"vmlal.s16 q11, d16, d1[2] \n" // k6
"vmlal.s16 q12, d17, d1[2] \n"
"vmlal.s16 q13, d18, d1[3] \n" // k7
"vmlal.s16 q14, d19, d1[3] \n"
"vmlal.s16 q11, d20, d2[0] \n" // k8
"vmlal.s16 q12, d21, d2[0] \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vst1.32 {d22-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(ktmp) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
if (remain > 0)
{
#if __ARM_NEON
int8x8_t _k01234567s8 = vld1_s8(ktmp);
int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp + 8);
int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3);
int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6);
int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8);
int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8);
int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8);
#endif
for (; remain > 0; remain--)
{
#if __ARM_NEON
int8x8_t _r00s8 = vld1_s8(r0);
int8x8_t _r10s8 = vld1_s8(r1);
int8x8_t _r20s8 = vld1_s8(r2);
int16x8_t _r00s16 = vmovl_s8(_r00s8);
int16x8_t _r10s16 = vmovl_s8(_r10s8);
int16x8_t _r20s16 = vmovl_s8(_r20s8);
int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16));
_sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16));
_sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16));
_sum = vsetq_lane_s32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_s32(_sum);
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
*outptr = vget_lane_s32(_ss, 0);
#endif // __aarch64__
#else
int sum = 0;
sum += (int)r0[0] * ktmp[0];
sum += (int)r0[1] * ktmp[1];
sum += (int)r0[2] * ktmp[2];
sum += (int)r1[0] * ktmp[3];
sum += (int)r1[1] * ktmp[4];
sum += (int)r1[2] * ktmp[5];
sum += (int)r2[0] * ktmp[6];
sum += (int)r2[1] * ktmp[7];
sum += (int)r2[2] * ktmp[8];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 9;
}
}
}
|
sam_layer.c | #include "sam_layer.h"
#include "utils.h"
#include "dark_cuda.h"
#include "blas.h"
#include <stdio.h>
#include <assert.h>
layer make_sam_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr,"scale Layer: %d\n", index);
layer l = { (LAYER_TYPE)0 };
l.type = SAM;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w2;
l.out_h = h2;
l.out_c = c2;
assert(l.out_c == l.c);
assert(l.w == l.out_w && l.h == l.out_h);
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.outputs;
l.index = index;
l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.output = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.forward = forward_sam_layer;
l.backward = backward_sam_layer;
#ifdef GPU
l.forward_gpu = forward_sam_layer_gpu;
l.backward_gpu = backward_sam_layer_gpu;
l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
#endif
return l;
}
void resize_sam_layer(layer *l, int w, int h)
{
l->out_w = w;
l->out_h = h;
l->outputs = l->out_w*l->out_h*l->out_c;
l->inputs = l->outputs;
l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
cuda_free(l->delta_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
#endif
}
void forward_sam_layer(const layer l, network_state state)
{
int size = l.batch * l.out_c * l.out_w * l.out_h;
//int channel_size = 1;
float *from_output = state.net.layers[l.index].output;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
l.output[i] = state.input[i] * from_output[i];
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
void backward_sam_layer(const layer l, network_state state)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
//axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
//scale_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
int size = l.batch * l.out_c * l.out_w * l.out_h;
//int channel_size = 1;
float *from_output = state.net.layers[l.index].output;
float *from_delta = state.net.layers[l.index].delta;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
state.delta[i] += l.delta[i] * from_output[i]; // l.delta * from (should be divided by channel_size?)
from_delta[i] = state.input[i] * l.delta[i]; // input * l.delta
}
}
#ifdef GPU
void forward_sam_layer_gpu(const layer l, network_state state)
{
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = 1;
sam_gpu(state.net.layers[l.index].output_gpu, size, channel_size, state.input, l.output_gpu);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_sam_layer_gpu(const layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = 1;
float *from_output = state.net.layers[l.index].output_gpu;
float *from_delta = state.net.layers[l.index].delta_gpu;
backward_sam_gpu(l.delta_gpu, size, channel_size, state.input, from_delta, from_output, state.delta);
}
#endif
|
GB_binop__second_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__second_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp32)
// A*D function (colscale): GB (_AxD__second_fp32)
// D*A function (rowscale): GB (_DxB__second_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB (_bind2nd__second_fp32)
// C=A'+scalar GB (_bind2nd_tran__second_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = bij
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP32 || GxB_NO_SECOND_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__second_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB (_bind2nd_tran__second_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
d2p.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <omp.h>
#include <unistd.h>
void print(int size, int const integers[])
{
for(int i = 0; i < size; i++)
printf("%d ", integers[i]);
printf("\n");
}
int init(int **in, int **out) {
int size;
fscanf(stdin, "%d", &size);
int *tmp = (int *) malloc(sizeof(int)*2*size);
if (!tmp) return 0;
int i=0;
while (fscanf(stdin, "%d", &tmp[i]) == 1) i++;
*in = tmp;
*out = (int *) malloc(sizeof(int)*2*size);
if (!out) return 0;
return size;
}
void destroy(int *in, int *out)
{
free(in);
free(out);
}
void swap(int* a, int* b)
{
int backup = *a;
*a = *b;
*b = backup;
}
int max(int a, int b)
{
return (a > b ? a : b);
}
int binarySearch(
int element, int const in[],
int idxFrom, int idxTo
)
{
int low = idxFrom;
int high = max(idxFrom, idxTo + 1);
while(low < high)
{
int mid = (low + high) / 2;
if(element <= in[mid])
high = mid;
else
low = mid + 1;
}
return high;
}
#ifdef GRAPH
int nextNode = 0;
void merge(
int const in[],
int idxStartArr1,
int idxEndArr1,
int idxStartArr2,
int idxEndArr2,
int out[],
int idxStartOut,
int nodeParent,
int node
)
#else
void merge(
int const in[],
int idxStartArr1,
int idxEndArr1,
int idxStartArr2,
int idxEndArr2,
int out[],
int idxStartOut
)
#endif
{
int sizeArr1 = idxEndArr1 - idxStartArr1 + 1;
int sizeArr2 = idxEndArr2 - idxStartArr2 + 1;
#ifdef GRAPH
fprintf(stderr, "\t%d [fillcolor=%d];\n",
node, omp_get_thread_num() + 1);
if(nodeParent >= 0)
fprintf(stderr, "\t%d -> %d;\n", nodeParent, node);
#endif
if(sizeArr1 < sizeArr2)
{
swap(&sizeArr1, &sizeArr2);
swap(&idxStartArr1, &idxStartArr2);
swap(&idxEndArr1, &idxEndArr2);
}
if(sizeArr1 > 0)
{
int idxPivotArr1 = (idxStartArr1 + idxEndArr1) / 2;
int idxPivotArr2 = binarySearch(
in[idxPivotArr1], in, idxStartArr2, idxEndArr2
);
int idxPivotOut =
idxStartOut +
(idxPivotArr1 - idxStartArr1) +
(idxPivotArr2 - idxStartArr2);
out[idxPivotOut] = in[idxPivotArr1];
#ifdef GRAPH
int myNextNode1, myNextNode2;
#pragma omp critical
{
myNextNode1 = ++nextNode;
myNextNode2 = ++nextNode;
}
#endif
#pragma omp task if(sizeArr1 > 10000 && sizeArr2 > 10000)
{
#ifdef GRAPH
merge(
in,
idxStartArr1, idxPivotArr1 - 1,
idxStartArr2, idxPivotArr2 - 1,
out, idxStartOut,
node, myNextNode1
);
#else
merge(
in,
idxStartArr1, idxPivotArr1 - 1,
idxStartArr2, idxPivotArr2 - 1,
out, idxStartOut
);
#endif
}
#pragma omp task if(sizeArr1 > 10000 && sizeArr2 > 10000)
{
#ifdef GRAPH
merge(
in,
idxPivotArr1 + 1, idxEndArr1,
idxPivotArr2, idxEndArr2,
out, idxPivotOut + 1,
node, myNextNode2
);
#else
merge(
in,
idxPivotArr1 + 1, idxEndArr1,
idxPivotArr2, idxEndArr2,
out, idxPivotOut + 1
);
#endif
}
}
}
void start_merge(
int const in[],
int idxStartArr1,
int idxEndArr1,
int idxStartArr2,
int idxEndArr2,
int out[],
int idxStartOut
)
{
omp_set_nested(1);
#pragma omp parallel
{
#pragma omp single nowait
{
merge(
in,
idxStartArr1,
idxEndArr1,
idxStartArr2,
idxEndArr2,
out,
#ifdef GRAPH
idxStartOut,
-1,
0
#else
idxStartOut
#endif
);
}
}
}
int main()
{
int size;
int *in, *out;
size = init(&in, &out);
if (!size) {
perror("Init failed");
return EXIT_FAILURE;
}
#ifdef GRAPH
fprintf(stderr, "digraph g {\n");
fprintf(stderr, "\tnode [colorscheme=rdylgn11 style=filled]\n");
#endif
double begin = omp_get_wtime();
start_merge(in, 0, size - 1, size, 2 * size - 1, out, 0);
double end = omp_get_wtime();
double duration = end - begin;
print(size * 2, out);
#ifdef GRAPH
fprintf(stderr, "}\n//");
#endif
fprintf(stderr, "Time: %lf seconds\n", duration);
destroy(in, out);
return EXIT_SUCCESS;
}
|
flushBarrierTaskWaitTaskYield.c | int main() {
int x = 10;
#pragma omp parallel
{
int localX = x;
#pragma omp flush
localX = 10;
#pragma omp barrier
localX = 10;
#pragma omp taskwait
localX = 10;
#pragma omp taskyield
localX = 10;
}
x = 20;
}
|
markstm.c | //** 2 functions called from i2.c **//
/* Move markers by using simple Runge-Kutta method */
void movemarkomp()
{
/* Vx, Vy buffer */
double dvxdx,dvxdy,dvydx,dvydy,celdx,celdy,vx0,vx1,vx2,vx3,vx4,vy0,vy1,vy2,vy3,vy4,ee0,ee1,ee2,ee3,ee4,sp0,sp1,sp2,sp3,sp4,pr0,pr1,pr2,pr3,pr4;
/* Water */
double vxwater,vywater;
long int mm1,marknum1,m10,m20,m30,m1,m2,m3;
/* Erosion-Sedimentation Y/N */
int n1;
int mm2;
/* Nonstabilyty for immobile markers */
double xnonstab=0.50,ynonstab=0.60;
double dpdx,dpdy,e,n,vxkoef,vykoef,dx,dy;
double start;
/* Hydration front progress */
#if setup>9
start=omp_get_wtime();
/* dehydration */
if(vyfluid!=0 && timesum>1e+11) hydration2omp();
fprintf(fp_log,"\n Time taken for hydration = %e s \n",omp_get_wtime()-start);
#endif
/* Save number of markers */
marknum1=marknum;
/* Surface changes */
#if setup>9
start=omp_get_wtime();
if (timestep && erosmod) erosion();
fprintf(fp_log,"\n Time taken for erosion = %e s \n",omp_get_wtime()-start);
#endif
/* Move markers */
#pragma omp parallel for shared(markx,marky,markt,markim,markk,markxx,markxy,markd,markv,markp,markexx,markexy,markw,marke,marknum1,follow,nm,ystpy,m10_hr,m11_hr,marknum,outgrid,eroslev,vyfluid,vymelt,GXKOEF,GYKOEF,markmod,timestep,zdeep,tdeep,markht,xsize,ysize,xnumx,ynumy,gx,gy,pr,esp,exx,exy,vx,vy,markcp,markkt,markkf,markkp,markro,markbb,markaa,marknu,markn0,markn1,markll,marka0,marka1,markb0,markb1,markdh,markdv,markss,markmm,marks1,start_cond,stoksmod) \
private(mm1,mm2,m10,m20,m1,m2,m3,n1,vxwater,vywater,e,n,dpdx,dpdy,a,b,vxkoef,vykoef,vx0,vy0,sp0,ee0,pr0,vx1,vy1,sp1,ee1,pr1,vx2,vy2,sp2,ee2,pr2,vx3,vy3,sp3,ee3,pr3,vx4,vy4,sp4,ee4,pr4,dx,dy) \
schedule(runtime)
for (mm1=0;mm1<marknum;mm1++)
{
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
if( ((markx[mm1]>=0 && marky[mm1]>=0 && (markx[mm1])<=xsize && (marky[mm1])<=ysize) || outgrid!=1) && !markim[mm2] )
{
// Search marker location within nodal grid
m10=m1serch(markx[mm1]);
m20=m2serch(marky[mm1]);
/**/
/* Erosion-Sedimentation */
if((marky[mm1])<=eroslev) n1=1; else n1=0;
/* Water marker move */
vxwater=vywater=0;
if(markt[mm1]>=50 && markt[mm1]<100)
{
/* Water velocity */
vywater=vyfluid; if(markd[mm1]>1100.0) vywater=vymelt;
/* Fluid in rock */
if(vyfluid>0 && (markk[mm1]==0 || markk[mm1]>298.0))
{
/* Horizontal,Vertical P-cell index */
m1=m10; if(markx[mm1]>(gx[m1]+gx[m1+1])/2.0) m1+=1;
if(m1<1) m1=1; if(m1>xnumx-2) m1=xnumx-2;
m2=m20; if(marky[mm1]>(gy[m2]+gy[m2+1])/2.0) m2+=1;
if(m2<1) m2=1; if(m2>ynumy-2) m2=ynumy-2;
/* Pressure gradients */
e=(markx[mm1]-(gx[m1-1]+gx[m1])/2.0)/((gx[m1+1]-gx[m1-1])/2.0);
n=(marky[mm1]-(gy[m2-1]+gy[m2])/2.0)/((gy[m2+1]-gy[m2-1])/2.0);
m3=m1*ynumy+m2;
dpdx=2.0*((1.0-n)*(pr[m3+ynumy]-pr[m3])+n*(pr[m3+ynumy+1]-pr[m3+1]))/(gx[m1+1]-gx[m1-1]);
dpdy=2.0*((1.0-e)*(pr[m3+1]-pr[m3])+e*(pr[m3+ynumy+1]-pr[m3+ynumy]))/(gy[m2+1]-gy[m2-1]);
/* Recalc velocity koefficients */
vxkoef=(1000.0*GXKOEF-dpdx)/(2300.0*9.81);
vykoef=(1000.0*GYKOEF-dpdy)/(2300.0*9.81);
if(vxkoef>2.0) vxkoef=2.0; if(vxkoef<-2.0) vxkoef=-2.0;
if(vykoef>2.0) vykoef=2.0; if(vykoef<-2.0) vykoef=-2.0;
/* Recalc velocity */
vxwater=vywater*vxkoef;
vywater*=vykoef;
}
else
/* Fluid in water */
{
vxwater=0;
vywater=-ABSV(vywater);
}
}
/* Motion Calc ///////////////////////////////// */
/* Vx, Vy, EpsII Simple calc */
if(markmod==1)
{
/* Interpolate velocity, pressure?, EE(eii), and ESP (spin) */
// These marker values are not stored, they are used in this routine to move each marker in private, thats it.
allinteriomp(markx[mm1],marky[mm1],m10,m20,&vx0,&vy0,&pr0,&sp0,&ee0);
vx0+=vxwater; vy0+=vywater;
/**/
/* fprintf(fp_log,"SIMPLE %ld %d %e %e %e %e %e",mm1,markt[mm1],markx[mm1],marky[mm1],vx0,vy0,sp0); getchar(); */
}
/* Vx, Vy, EpsII 4 Runge-Kutta koef calc */
else
{
allinteriomp(markx[mm1],marky[mm1],m10,m20,&vx1,&vy1,&pr1,&sp1,&ee1);
vx1+=vxwater; vy1+=vywater;
/**/
//fprintf(fltest,"RK4 %ld %d %e %e %e %e %e %e \n",mm1,markt[mm1],markx[mm1],marky[mm1],vx1,vy1,sp1,ee1);
/**/
allinteriomp(markx[mm1]+vx1*timestep/2.0,marky[mm1]+vy1*timestep/2.0,m10,m20,&vx2,&vy2,&pr2,&sp2,&ee2);
vx2+=vxwater; vy2+=vywater;
/**/
allinteriomp(markx[mm1]+vx2*timestep/2.0,marky[mm1]+vy2*timestep/2.0,m10,m20,&vx3,&vy3,&pr3,&sp3,&ee3);
vx3+=vxwater; vy3+=vywater;
/**/
allinteriomp(markx[mm1]+vx3*timestep,marky[mm1]+vy3*timestep,m10,m20,&vx4,&vy4,&pr4,&sp4,&ee4);
vx4+=vxwater; vy4+=vywater;
/**/
/* Vx,Vy, EpsXX, EpsYY, EpsXY calc after Runge-Kutta */
vx0=(vx1+2.0*vx2+2.0*vx3+vx4)/6.0;
vy0=(vy1+2.0*vy2+2.0*vy3+vy4)/6.0;
if(markmod==2)
{
sp0=(sp1+2.0*sp2+2.0*sp3+sp4)/6.0;
ee0=(ee1+2.0*ee2+2.0*ee3+ee4)/6.0;
}
else
{
sp0=sp1;
ee0=ee1;
}
}
/* Orthogonal motion only */
if (outgrid==2)
{
if(markx[mm1]<0 || (markx[mm1])>xsize) vy0=0;
if(marky[mm1]<0 || (marky[mm1])>ysize) vx0=0;
}
/* Normal markers */
if(markt[mm1]<100)
{
/* Markers coming from below the model */
if(marky[mm1]>zdeep && markk[mm1]<tdeep) markk[mm1]=tdeep;
// If you do not want to apply a large temperature lower boundary condition use:
//if(marky[mm1]>zdeep && vy0<0 && markk[mm1]<tdeep) markk[mm1]=tdeep;
/* Normal markers */
/* X,Y calc after Runge-Kutta */
markx[mm1]+=(timestep*vx0);
marky[mm1]+=(timestep*vy0);
if(marke[mm1]>0)
{
marke[mm1]+=(timestep*ee0);
}
sp0*=timestep;
/* Turcotte & Schubert, 1995 rotation formula */
if(stoksmod==1)
{
sp1=markxx[mm1]*cos(sp0)*cos(sp0)-markxx[mm1]*sin(sp0)*sin(sp0)+markxy[mm1]*sin(2.0*sp0);
sp3=0.5*(-markxx[mm1]-markxx[mm1])*sin(2.0*sp0)+markxy[mm1]*cos(2.0*sp0);
markxx[mm1]=sp1;
markxy[mm1]=sp3;
}
/* Jaumann corrotation formula */
if(stoksmod==2)
{
sp1=markxx[mm1]+markxy[mm1]*2.0*sp0;
sp3=markxy[mm1]+0.5*(-markxx[mm1]-markxx[mm1])*2.0*sp0;
markxx[mm1]=sp1;
markxy[mm1]=sp3;
}
/* Out of grid marker reset */
if(markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize)
{
markk[mm1]=0;
markd[mm1]=-1.0;
markw[mm1]=-1.0;
marke[mm1]=0;
}
}
/* Immobile markers */
else
{
/* X,Y calc after Runge-Kutta */
// Which velocity is used here, if were before located outside of grid ...
markx[mm1]+=(timestep*vx0);
marky[mm1]+=(timestep*vy0);
/* Check new position, add marker at end (marknum1) */
// Immobile markers that now enter grid
if(markx[mm1]>=0 && marky[mm1]>=0 && markx[mm1]<=xsize && marky[mm1]<=ysize)
{
#pragma omp critical(newmark)
{
#pragma omp flush(marknum1)
/* Type save */
markt[marknum1]=markt[mm1]-100;
/* X,Y calc after Runge-Kutta */
// Give marker new location (within grid)
markx[marknum1]=markx[mm1];
marky[marknum1]=marky[mm1];
/* Temperature Reset */
markk[marknum1]=0;
markd[marknum1]=-1.0;
markv[marknum1]=0;
/* Strain Reset */
marke[marknum1]=0;
/* Stress Reset */
markxx[marknum1]=0;
markxy[marknum1]=0;
/* Pressure Reset */
markp[marknum1]=0;
/* Strain rate Reset */
markexx[marknum1]=0;
markexy[marknum1]=0;
/* Add aditional markers counter */
marknum1++;
/* X,Y reset for immobile marker */
markx[mm1]=markk[mm1];
marky[mm1]=markv[mm1];
// If new marker is interesting for picking algorithm, flag to follow
// Note is hard-coded in i2.c as well. Only here excluded fluid markers, since immobile can not become fluid
#if setup>9
if (start_cond==1 && marky[marknum1]<85e3 && markx[marknum1]>gx[m10_hr] && markx[marknum1]<gx[m11_hr] && markt[marknum1]>1 && markt[marknum1]<50)
{
follow[marknum1]=1;
// #pragma omp flush(nm)
nm++;
}
#endif
}
}
/* Check,Reset old position */
// Use markk and v as dummy from above, so dx and/or dy are 0 if marker is newly added
dx=markx[mm1]-markk[mm1];
dy=marky[mm1]-markv[mm1];
dy=pow(dx*dx+dy*dy,0.5);
/*
if(dy>ystpy || (marky[mm1]<0 && vy0<0) || (marky[mm1]>ysize && vy0>0) || (markx[mm1]<0 && vx0<0) || (markx[mm1]>xsize && vx0>0))
*/
// If moved by more than one cell, reset to old position ?
if(dy>ystpy)
{
/* X,Y reset for immobile marker */
markx[mm1]=markk[mm1];
marky[mm1]=markv[mm1];
}
}
/* End Motion Calc ///////////////////////////////// */
}
}
// End omp-section move markers
/* Mark num */
if(marknum1>MAXMRK) {fprintf(fp_log,"Space out in markx[]"); fflush(fp_log); exit(0);}
/* Reset aditional markers */
mm1=0;
while(marknum1>marknum && mm1<marknum)
{
/* Reload marker */
if((markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) && markt[mm1]<100)
{
/* Decrease aditional markers counter */
marknum1--;
/* Type save */
markt[mm1]=markt[marknum1];
/* Temperature Reset */
markk[mm1]=0;
markd[mm1]=-1.0;
/* Strain Reset */
marke[mm1]=0;
/* Stress Reset */
markxx[mm1]=0;
markxy[mm1]=0;
/* Pressure Reset */
markp[mm1]=0;
/* Strain rate Reset */
markexx[mm1]=0;
markexy[mm1]=0;
/* X,Y reload */
markx[mm1]=markx[marknum1];
marky[mm1]=marky[marknum1];
}
/* Increase markers counter */
mm1++;
}
fprintf(fp_log,"\n Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1);fflush(fp_log);
/* Set new marker number */
marknum=marknum1;
/* Incr cycle of sedimentation */
sedimnum++;
}
/* End OMP move markers by using Simple/Runge-Kutta method */
/* ro[],nu[] recalc after marker positions */
void ronurecalcomp()
{
/* Counters */
long int m1,m2,m3,m10,m20;
int mm2,yn,mm3,n1,n2,ncount=0,nt,tid;
long int mm1;
double dx,dy,swt,swt1,celdx,celdy;
double wro,mnu,mgg,maa,mdro,msxxe,msxye,mexxe,mexye,mro,mcp,mkt,mht,mbb,mdi0,mdi1,mwa,dmwa,mxmelt,mhlatent;
double Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt;
// Here in this loop epsin is 2nd invariant of visco-plastic strainrate
double sigin,epsin;
/* TD Database variables, dTK,dPB - TK, PB step for tabulation in TD database */
double H0,H1,H2,H3,R0,R1,R2,R3,G0,G1,G2,G3,W0,W1,W2,W3,dTK=20.0,dPB=1000.0,n,e;
/* Phase transition variables */
double p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in;
/* RO, NU equations var */
double mpb=1.0,mtk=300.0,numax=0,numin=0;
double start,xwall,b1,b2,slope_wall,gelbeg;
start=omp_get_wtime();
Mgg=Mro=Mwa=Mcp=Mbb=Maa=Mdhh=Mkt=0;
if (printmod) fprintf(fp_log,"\n Number of nodes = %ld Number of markers = %ld \n",nodenum,marknum);
fflush(fp_log);
#pragma omp parallel
{nt=omp_get_num_threads();}
/* Layering on sediments */
m1=(long int)(sedimnum/sedimcyc);
m2=((long int)(m1/2))*2;
if(m2==m1) yn=3; else yn=4;
/* ADD MARKERS TO THE v-CELLS ========================== */
/* Clear ro[],nu[] wt */
for (m1=0;m1<nodenum;m1++)
{
ro0[m1]=0;
et0[m1]=0;
nu0[m1]=0;
nd0[m1]=0;
gg0[m1]=0;
gd0[m1]=0;
sxxe0[m1]=0;
sppe0[m1]=0;
sbritn0[m1]=0; // yield stress
sxye0[m1]=0;
exxe0[m1]=0;
exye0[m1]=0;
dro0[m1]=0;
drp0[m1]=0;
cp0[m1]=0;
kt0[m1]=0;
ht0[m1]=0;
tk0[m1]=0;
mrx0[m1]=0;
mry0[m1]=0;
mvx0[m1]=0;
mvy0[m1]=0;
sol0[m1]=0;
sol0[nodenum+m1]=0;
sol0[nodenum2+m1]=0;
sol1[m1]=0;
sol1[nodenum+m1]=0;
sol1[nodenum2+m1]=0;
}
#if setup>9
/* (1) Erosion-sedimentation and melting account for all markers */
#pragma omp parallel for shared(markx,marky,markk,markt,marke,markd,marknum,waterlev,erosmod,deserp,dyserp,xsize,ysize,gx,gy,xnumx,ynumy,ep,pr,timesum,res_high) \
private(mm1,mm2,m3,mtk,mpb,m10,m20,mxmelt,mhlatent) \
firstprivate(yn) \
schedule(runtime)
for (mm1=0;mm1<marknum;mm1++)
{
/* Check markers out of grid */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markk[mm1]>0 && markt[mm1]<50)
{
/* Up Left Node X,Y Num */
m10=m1serch(markx[mm1]);
m20=m2serch(marky[mm1]);
m3=m10*ynumy+m20;
mm2=(int)markt[mm1];
/* Erosion/sedimentation account */
if(erosmod) erosmarkomp(mm1,yn,m10,markx[mm1],marky[mm1],markt,marke,markd);
/* Water/Air account */
if(markt[mm1]<2)
{
/* Change marker type */
if((marky[mm1])>waterlev) markt[mm1]=1; else markt[mm1]=0;
}
/* P, T parameters calc */
// 1e-5 since convert to bars for melting look-up?
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m10,m20);
mtk=markk[mm1];
// Remove initial weak zone for subduction initiation after X My
if(timesum>(20.0e6*3.15576e+7) && markt[mm1]==12)
{markt[mm1]=9;}
/* Serpentinization of brittle mantle faults at sub-surface */
if((markt[mm1]==9 || markt[mm1]==9 || markt[mm1]==9) && marke[mm1]>deserp && marky[mm1]<dyserp)
{
/* Mantle to Antigorite transformation */
markt[mm1]=13;
markd[mm1]=-1.0;
}
/* Mantle to Antigorite transformation */
antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m10,markt);
/* Rocks to rock+melt transformation */
// Note markt passes in address of first element of array to function and allows for modification there
if (meltmod) meltingomp(mtk,mpb,mm1,mm2,markt,marke,&mxmelt,&mhlatent);
}
}
// End OMP section erosion-sedimentation
// Open file for storing marker interface data
if (n0==1)
{
flfric = fopen(fileTxtOutputFric,"w");
fprintf(flfric," rocktype markx marky markpressure sbrit markxx markxy \n");
}
#endif
if (printmod==10000) fprintf(fp_log,"\n Time taken for erosmark/antigor/melting in ronurecalc = %e s \n",omp_get_wtime()-start);
start=omp_get_wtime();
/* (2) Add ro[] nu[] etc. using selected markers */
#pragma omp parallel shared(marknum,gridmod,markx,marky,markk,markt,erosmod,sedilev,markw,markd,eroslev, \
waterlev,zdeep,densimod,markht,marke,markexx,markexy,markxx,markxy,markp, \
nodenum,nodenum2,markvx,markvy,markv,markwa,markim,xsize,ysize,gx,gy,xnumx,ynumy,ep,pr,vx,vy,markf0,markf1,markbb,markaa,markro,markgg, \
markn0,markn1,marks0,marks1,marknu,markcp,markkt,markkf,markkp,nubeg,nuend,strmin,strmax,\
exy,esp,exx,pbmin,pbstp,pbnum,tkmin,tkstp,tknum,pbmin1,pbstp1,pbnum1,tkmin1,tkstp1,tknum1,timesum,td, \
zmpor,tkpor,markll,hidrl,hidry,lambfld,marka0,markb0,marke1,marka1,markb1,marke0,msbrit,msii_old, \
tk_updipsez0,tk_updipsez1,mgamma_vw,mgamma_vs,mvc_vs,mvc_vw,mus_vs,markdh,markdv,markss,markmm,timestepe,cyc0max,start_cond,veldepfric,stoksmod,res_high) \
private(mm1,mm2,tid,m10,m20,m1,m3,mpb,mtk,mro,mbb,maa,mcp,mkt,mht,mnu,mgg,mxmelt,mhlatent,mwa,wro,dmwa,mdi0,mdi1,mdro, \
celdx,celdy,swt,swt1,dx,dy,msxxe,msxye,mexxe,mexye,sigin,epsin,Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt,p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in) \
firstprivate(yn)
{
// Initialize temporarily interpolation arrays (capitilized) to zero (inside pragma, so is private)
double* Nu0 = (double*) calloc(nodenum,sizeof(double));
double* Nd0 = (double*) calloc(nodenum,sizeof(double));
double* Gg0 = (double*) calloc(nodenum,sizeof(double));
double* Gd0 = (double*) calloc(nodenum,sizeof(double));
double* Ro0 = (double*) calloc(nodenum,sizeof(double));
double* Sxxe0 = (double*) calloc(nodenum,sizeof(double));
double* Sppe0 = (double*) calloc(nodenum,sizeof(double));
double* Sbritn0 = (double*) calloc(nodenum,sizeof(double));
double* Sxye0 = (double*) calloc(nodenum,sizeof(double));
double* Exxe0 = (double*) calloc(nodenum,sizeof(double));
double* Exye0 = (double*) calloc(nodenum,sizeof(double));
double* Et0 = (double*) calloc(nodenum,sizeof(double));
double* Dro0 = (double*) calloc(nodenum,sizeof(double));
double* Drp0 = (double*) calloc(nodenum,sizeof(double));
double* Cp0 = (double*) calloc(nodenum,sizeof(double));
double* Kt0 = (double*) calloc(nodenum,sizeof(double));
double* Ht0 = (double*) calloc(nodenum,sizeof(double));
double* Tk = (double*) calloc(nodenum,sizeof(double));
double* Mrx0 = (double*) calloc(nodenum,sizeof(double));
double* Mry0 = (double*) calloc(nodenum,sizeof(double));
double* Mvx0 = (double*) calloc(nodenum,sizeof(double));
double* Mvy0 = (double*) calloc(nodenum,sizeof(double));
double* Sol0 = (double*) calloc(nodenum*3,sizeof(double));
double* Sol1 = (double*) calloc(nodenum*3,sizeof(double));
#pragma omp for \
schedule(runtime)
for (mm1=0;mm1<marknum;mm1+=gridmod)
{
/* Check markers out of grid */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markk[mm1]>0 && markt[mm1]<50)
{
tid=omp_get_thread_num();
m10=m1serch(markx[mm1]);
m20=m2serch(marky[mm1]);
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
#if setup>9
/* 1a. --- Remove water, rocks --- */
if(erosmod==0)
{
if(marky[mm1]>sedilev && mm2<2)
{
mm2=yn; markt[mm1]=yn;
markw[mm1]=0;
markd[mm1]=-1.0;
}
if(marky[mm1]<eroslev && mm2>1)
{
if((marky[mm1])>waterlev) markt[mm1]=1; else markt[mm1]=0;
mm2=markt[mm1];
markw[mm1]=0;
markd[mm1]=-1.0;
}
}
/* 1b. Remove Plumes */
if(marky[mm1]>zdeep && mm2!=10)
{
mm2=10; markt[mm1]=10;
markw[mm1]=0;
markd[mm1]=-1.0;
}
#endif
/* P, T parameters calc */
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m10,m20);
mtk=(markk[mm1]);
/* Reset water/air temperature */
// if (mm2<2) mtk=markk[mm1]=273.0;
/* 2.-3. --- Calculate density --- */
// & just points to address of normally defined variables at start of this routine; more efficient for passing! this way variable here can be changed inside subroutine
#if setup>9
dencalcomp(mtk,mpb,markx[mm1],marky[mm1],mm2,&mro,&mbb,&maa);
mcp=markcp[mm2];
mkt=(markkt[mm2]+markkf[mm2]/(mtk+77.0))*exp(markkp[mm2]*mpb);
/* ========================= */
/* Mantle phase transitions */
/* ========================= */
if (densimod==1)
{
/*
if(mm2>=9 && mm2<=14 && markex[mm1]>0) mro*=1.0-0.04*markex[mm1];
*/
/* Eclogitization, St, Pv transitions in oceanic crust */
if(mm2==7 || mm2==8 )
{
/* Eclogitization Ito and Kennedy, 1971 */
/*basalt=>garnet granulite (Ga-In) transition*/
p_ga_in=-9222.0+mtk*14.0;
/*Not to have granulites at pressure lower than 2 kbar*/
if(p_ga_in<2000.0) p_ga_in=2000.0;
/*garnet granulite=>eclogite (Pl-Out) transition*/
p_pl_out=-1460.0+mtk*20.0;
/*Not to have eclogites at pressure lower than 12 kbar*/
if(p_pl_out<12000.0) p_pl_out=12000.0;
if(mpb>p_ga_in)
{
rokf=0;
if(mtk>teclmin)
{
if(mtk>teclmax)
{
rokf=0.16;
}
else
{
rokf=0.16*(mtk-teclmin)/(teclmax-teclmin);
}
}
if(mpb>=p_pl_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_ga_in)/(p_pl_out-p_ga_in));
}
}
/* Coe->St transition Gerya et al., 2004, PCM */
p_st_in=59100.0+mtk*22.6;
if(mpb>p_st_in) mro*=1.06;
/* Pv transition, Mishin et al., 2008 with slope from Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=354000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=352000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.08;
if(mpb>=p_sp_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
/* Ol-Sp and Pv transitions in the mantle */
if(mm2>=9 && mm2<=14)
{
/* Ol-Sp transition, Katsura & Ito, 1989 */
/* Ol-out transition*/
p_ol_out=91000.0+mtk*27.0;
/* Sp-in transition*/
p_sp_in=66000.0+mtk*39.0;
/*Limit width of Sp-Ol transition to 2 kbar */
if(p_sp_in>p_ol_out-2000.0) p_sp_in=p_ol_out-2000.0;
if(mpb>p_sp_in)
{
rokf=0.06;
if(mpb>=p_ol_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_sp_in)/(p_ol_out-p_sp_in));
}
}
/* Pv transition, Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=304000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=302000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.11;
if(mpb>=p_sp_out)
{
mro*=1.0+rokf;
}
else
{
mro*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
}
/* ========================== end */
/* Test Heat conductivity k=ko/(1+b*(T-To)/To) */
if (markkt[mm2]<0) mkt=-markkt[mm2]/(1.0+markkf[mm2]*(mtk-markkp[mm2])/markkp[mm2]);
mht=markht[mm2];
/* 4. Molten rocks */
// Note: Calls viscalc only for melted rocks here !
if (mm2>20) { meltpartomp(mtk,mpb,markx[mm1],marky[mm1],mm1,mm2,&mro,&mbb,&maa,&mnu,&mcp,&mkt,&mgg,&mxmelt,&mhlatent); }
/* ~X Thermodynamic database use for water */
// ATAT QUESTION TARAS: I would suggest to remove this if-statement, since it is harldy use. Do you agree? To avoid what was it included?
/* Density && Water wt% save */
// Hardly used; something that is larger than air or water in rock type number, but has negative density, at very start of model..
if ( densimod==3 && mm2>1 && (timesum<=1e+11 || markd[mm1]<=0) )
{
// tdbasecalc(mtk,mpb,mm2,mm1);
// markw[mm1]=eps[42];
// markd[mm1]=mro;
// ATATOMP QUESTION TARAS: the above form of mro means that it comes from dencalcomp or meltpartomp, and not from tdbasecalc above that stored is as eps.. ; what you wanted ?
// Use capital letters since Taras does not always assign eps value into this loop for m.. (eg mkt in next densimod2 call few lines below)
// ATATOMP QUESTION TARAS: Is this intentionally? With what purpose ? eg for mkt in next densimod2 call few lines below
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m10,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
markw[mm1]=Mwa;
markd[mm1]=mro;
}
#elif setup<10
// In lab have constant density per rocktype and no thermal evolution, so much faster
mro=markro[mm2];
#endif
/* ---> (3) Marker rheology: calculate viscosity and stresses <--- */
mdi0=0;
mdi1=1.0;
#if setup>9
if(mm2<=20)
#endif
{
// yn = 1 now means that plasticity IS executed in this routine call to viscalc! This is the only successfull call within current code for normal, non-melting rocks !
viscalcomp(mtk,mpb,markx[mm1],marky[mm1],markv[mm1],markwa[mm1],markk[mm1],markp[mm1],markt[mm1],markexx[mm1],markexy[mm1],markxx,markxy,marke,mm1,mm2,1,m10,&mnu,&mdi0);
/* XXX Density correction for the dilation angle XXX = not executed */
if(markf0[mm2]>0 && markf1[mm2]>0 && marke[mm1]>0)
{
/* Second invariant of viscoplastic strain calc, check */
sigin=pow(markxx[mm1]*markxx[mm1]+markxy[mm1]*markxy[mm1],0.5);
epsin=marke[mm1]-sigin/2.0/markgg[mm2];
if(epsin>markf1[mm2]) epsin=markf1[mm2];
if(epsin>0) mdi1=exp(-2.0*epsin*markf0[mm2]);
}
}
msxxe=markxx[mm1];
msxye=markxy[mm1];
mexxe=markexx[mm1];
mexye=markexy[mm1];
mgg=markgg[mm2];
/* Min,Max NU limitation */
if(mnu<nubeg) mnu=nubeg; if(mnu>nuend) mnu=nuend;
/* Water/Air account */
#if setup>9
if(mm2<2)
{
markd[mm1]=mro;
mdi0=0;
mdi1=1.0;
}
#endif
/* End Water/Air account */
/* Calc log density derivative, save new density */
if(markd[mm1]<=0 || densimod==0)
{markd[mm1]=mro; mdi0=0;}
mdro=0;
maa=0;
mdi1=1.0;
#if setup>9
if(timestepe)
{
mdro=mro/markd[mm1];
mdro=log(mdro)-mdi0;
//if(epsin>0 && debugmod) {fprintf(fp_log,"d %ld %d %e %e %e %e %e %e %e %e %e %e",mm1,mm2,markx[mm1],marky[mm1],marke[mm1]*2.0*markgg[mm2],sigin,marke[mm1],epsin,-2.0*epsin*markf0[mm2],markd[mm1],mro,mdro);getchar();}
}
#endif
/* Save new density */
mdro=-mdi0;
markd[mm1]=mro;
/* Correct new density for dilation */
mro*=mdi1;
/* Saving marker viscosity */
markv[mm1]=mnu;
// if(debugmod) {fprintf(fp_log,"num=%ld type=%d x=%e y=%e mpb=%e mtk=%e nu=%e ro=%e cp=%e kt=%e ht=%e",mm1,mm2,markx[mm1],marky[mm1],mpb,mtk,mnu,mro,mcp,mkt,mht);getchar()};
/* --> (4) Interpolation from markers to 4 corners of the cell ====================================*/
/* Marker weight calculation using dimension of current Cell */
celdx=gx[m10+1]-gx[m10];
celdy=gy[m20+1]-gy[m20];
swt1=1.0/celdx/celdy;
/* Marker weights calculation using dimension of current Cell */
celdx=(markx[mm1]-gx[m10])/(gx[m10+1]-gx[m10]);
celdy=(marky[mm1]-gy[m20])/(gy[m20+1]-gy[m20]);
if (celdx<0 || celdy<0 || celdx>1.0 ||celdy>1.0) {fprintf(fp_log," WARNING !!! num=%ld type=%d x=%e y=%e celdx=%e celdy=%e",mm1,mm2,markx[mm1],marky[mm1],celdx,celdy); fflush(fp_log); getchar();}
/* --- Interpolate ro,nu etc to nodes using interpolation coefficients --- */
for (m1=0;m1<4;m1++)
{
/* Marker weight calculation using dimension of current Cell */
/* Different corners */
/* 0 2 */
/* 1 3 */
switch(m1)
{
case 0:
/* Calc node number */
m3=m10*ynumy+m20;
/* Add shear viscosity Nu */
if (celdx<0.5 && celdy<0.5)
{
dx=1.0-2.0*celdx;
dy=1.0-2.0*celdy;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
/* Add Vx and Mx from markers */
if (celdx<0.5)
{
dx=1.0-celdx;
dy=1.0-ABSV(celdy-0.5);
swt=swt1*dx*dy;
Mvx0[m3]+=markvx[mm1]*mro*swt;
Mrx0[m3]+=mro*swt;
Sol0[nodenum2+m3]+=swt;
}
/* Add Vy and My from markers */
if (celdy<0.5)
{
dx=1.0-ABSV(celdx-0.5);
dy=1.0-celdy;
swt=swt1*dx*dy;
Mvy0[m3]+=markvy[mm1]*mro*swt;
Mry0[m3]+=mro*swt;
Sol1[nodenum2+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*(1.0-celdx)*(1.0-celdy);
break;
case 1:
/* Calc node number */
m3=m10*ynumy+m20+1;
/* Add shear viscosity Nu */
if (celdx<0.5 && celdy>0.5)
{
dx=1.0-2.0*celdx;
dy=2.0*celdy-1.0;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
/* Add Vy and My from markers */
if (celdy>0.5)
{
dx=1.0-ABSV(celdx-0.5);
dy=celdy;
swt=swt1*dx*dy;
Mvy0[m3]+=markvy[mm1]*mro*swt;
Mry0[m3]+=mro*swt;
Sol1[nodenum2+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*(1.0-celdx)*celdy;
break;
case 2:
/* Calc node number */
m3=(m10+1)*ynumy+m20;
/* Add shear viscosity Nu, Sxy */
if (celdx>0.5 && celdy<0.5)
{
dx=2.0*celdx-1.0;
dy=1.0-2.0*celdy;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
/* Add Vx and Mx from markers */
if (celdx>0.5)
{
dx=celdx;
dy=1.0-ABSV(celdy-0.5);
swt=swt1*dx*dy;
Mvx0[m3]+=markvx[mm1]*mro*swt;
Mrx0[m3]+=mro*swt;
Sol0[nodenum2+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*celdx*(1.0-celdy);
break;
case 3:
/* Calc node number */
m3=(m10+1)*ynumy+m20+1;
/* Add shear viscosity Nu */
if (celdx>0.5 && celdy>0.5)
{
dx=2.0*celdx-1.0;
dy=2.0*celdy-1.0;
swt=swt1*dx*dy;
Nu0[m3]+=mnu*swt;
Gg0[m3]+=mnu/mgg*swt;
Sxye0[m3]+=msxye*swt;
Exye0[m3]+=mexye*swt;
Sol0[nodenum+m3]+=swt;
}
// Add values to central node once
// Brackets determine the scope of the to here limited variables, but currently make no difference
{
dx=1.0-2.0*ABSV(celdx-0.5);
dy=1.0-2.0*ABSV(celdy-0.5);
swt=swt1*dx*dy;
Nd0[m3]+=mnu*swt;
Gd0[m3]+=mnu/mgg*swt;
Sxxe0[m3]+=msxxe*swt;
Sppe0[m3]+=markp[mm1]*swt;
Sbritn0[m3]+=msbrit[mm1]*swt; // Yield stress in the pressure node
Exxe0[m3]+=mexxe*swt;
Dro0[m3]+=mdro*swt;
Drp0[m3]+=maa*swt;
Sol1[nodenum+m3]+=swt;
}
// Calculate standard weight for physical properties
swt=swt1*celdx*celdy;
break;
}
// End switch of weight calculation
/* Add Physical Properties: ro,nu, etc. */
// fprintf(fp_log,"num=%ld type=%d x=%e y=%e cell=%ld dx=%e dy=%e swt=%e",mm1,mm2,markx[mm1],marky[mm1],m3,dx,dy,swt);getchar();
// nu0[m3]+=mnu*swt;
// ATAT TARAS Why use mcp*MRO? in routines calculate purely from markcp... not done in manuele's, later in heat mrocp, but unrelated
Ro0[m3]+=mro*swt;
Et0[m3]+=mbb*swt;
Cp0[m3]+=mcp*mro*swt;
Kt0[m3]+=mkt*swt;
Ht0[m3]+=mht*swt;
Sol0[m3]+=swt;
/* Add T */
if(!markim[mm2])
{
Tk[m3]+=mtk*swt;
Sol1[m3]+=swt;
}
}
/* End Interpolation from markers to nodes ====================================*/
}
}
// Add interpolation arrays from different processors and free their memory
#pragma omp critical (sumsolarrays)
{
for (m3=0;m3<nodenum;m3++)
{
nu0[m3]+=Nu0[m3];
nd0[m3]+=Nd0[m3];
gd0[m3]+=Gd0[m3];
gg0[m3]+=Gg0[m3];
ro0[m3]+=Ro0[m3];
cp0[m3]+=Cp0[m3];
kt0[m3]+=Kt0[m3];
ht0[m3]+=Ht0[m3];
dro0[m3]+=Dro0[m3];
drp0[m3]+=Drp0[m3];
sxxe0[m3]+=Sxxe0[m3];
sxye0[m3]+=Sxye0[m3];
sppe0[m3]+=Sppe0[m3];
sbritn0[m3]+=Sbritn0[m3];
exxe0[m3]+=Exxe0[m3];
exye0[m3]+=Exye0[m3];
et0[m3]+=Et0[m3];
tk0[m3]+=Tk[m3];
mrx0[m3]+=Mrx0[m3];
mry0[m3]+=Mry0[m3];
mvx0[m3]+=Mvx0[m3];
mvy0[m3]+=Mvy0[m3];
sol0[m3]+=Sol0[m3];
sol1[m3]+=Sol1[m3];
sol0[nodenum+m3]+=Sol0[nodenum+m3];
sol1[nodenum+m3]+=Sol1[nodenum+m3];
sol0[nodenum2+m3]+=Sol0[nodenum2+m3];
sol1[nodenum2+m3]+=Sol1[nodenum2+m3];
}
}
// Free dynamically allocated interpolation arrays
free(Nu0);
free(Nd0);
free(Gg0);
free(Gd0);
free(Ro0);
free(Sxxe0);
free(Sppe0);
free(Sbritn0);
free(Sxye0);
free(Exxe0);
free(Exye0);
free(Et0);
free(Dro0);
free(Drp0);
free(Cp0);
free(Kt0);
free(Ht0);
free(Tk);
free(Mrx0);
free(Mry0);
free(Mvx0);
free(Mvy0);
free(Sol0);
free(Sol1);
}
// End OMP section marker to node interpolation
#if setup>9
if (n0==1){fclose(flfric);}
#endif
if (printmod==10000) fprintf(fp_log,"\n Time taken for rho and vis calc + M->N1 in ronurecalc = %e s \n",omp_get_wtime()-start);
start=omp_get_wtime();
/* Recalculate ro[] nu[] */
for (m1=0;m1<xnumx;m1++)
{
for (m2=0;m2<ynumy;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
/* Shear viscosity recalc check */
if(sol0[nodenum+m3])
{
// Boundary Condition Viscosity (set in mu)
if(mu[m3] && (timesum<timebond || m1<=2 || m2<=2 || m1>=xnumx-4 || m2>=ynumy-3))
{
// BC value defined in init.t3c
if(mu[m3]>0)
{
nu0[m3]=mu[m3];
}
else
{
nu0[m3]/=sol0[nodenum+m3];
if(nu0[m3]>-mu[m3]) nu0[m3]=-mu[m3];
}
}
// Rest; solution
else
{
nu0[m3]/=sol0[nodenum+m3];
}
/* Min,Max NU limitation */
if(nu0[m3]<nubeg) nu0[m3]=nubeg; if(nu0[m3]>nuend) nu0[m3]=nuend;
/* Min,Max NU definition for nu contrast limit */
if(numin==0 || nu0[m3]<numin) numin=nu0[m3]; if(numax==0 || nu0[m3]>numax) numax=nu0[m3];
nu[m3]=nu0[m3];
/* Elastic shear stress Sxy recalc */
sxye[m3]=sxye0[m3]/sol0[nodenum+m3];
exye[m3]=exye0[m3]/sol0[nodenum+m3];
/* Shear shear modulus recalc */
gg[m3]=nu[m3]/(gg0[m3]/sol0[nodenum+m3]);
/* Reset weight */
sol0[nodenum+m3]=0;
}
/* Normal viscosity recalc check */
if(sol1[nodenum+m3])
{
if(mu[m3] && (timesum<timebond || m1<=2 || m2<=2 || m1>=xnumx-4 || m2>=ynumy-3))
{
if(mu[m3]>0)
{
nd0[m3]=mu[m3];
}
else
{
nd0[m3]/=sol1[nodenum+m3];
if(nd0[m3]>-mu[m3]) nd0[m3]=-mu[m3];
}
}
else
{
nd0[m3]/=sol1[nodenum+m3];
}
/* Min,Max NU limitation */
if(nd0[m3]<nubeg) nd0[m3]=nubeg; if(nd0[m3]>nuend) nd0[m3]=nuend;
/* Min,Max NU definition for nu contrast limit */
if(numin==0 || nd0[m3]<numin) numin=nd0[m3]; if(numax==0 || nd0[m3]>numax) numax=nd0[m3];
nd[m3]=nd0[m3];
/* Elastic Normal stress recalc */
sxxe[m3]=sxxe0[m3]/sol1[nodenum+m3];
sppe[m3]=sppe0[m3]/sol1[nodenum+m3];
sbritn[m3]=sbritn0[m3]/sol1[nodenum+m3];
exxe[m3]=exxe0[m3]/sol1[nodenum+m3];
/* Density changes recalc */
dro[m3]=dro0[m3]/sol1[nodenum+m3];
drp[m3]=drp0[m3]/sol1[nodenum+m3];
/* Normal shear modulus recalc */
gd[m3]=nd[m3]/(gd0[m3]/sol1[nodenum+m3]);
/* Reset weight */
sol1[nodenum+m3]=0;
}
/* Vx Mx recalc check */
if(sol0[nodenum2+m3])
{
/* Material constants recalc */
mvx[m3]=mvx0[m3]/mrx0[m3];
mrx[m3]=mrx0[m3]/sol0[nodenum2+m3];
sol0[nodenum2+m3]=0;
}
/* Vy My recalc check */
if(sol1[nodenum2+m3])
{
/* Material constants recalc */
mvy[m3]=mvy0[m3]/mry0[m3];
mry[m3]=mry0[m3]/sol1[nodenum2+m3];
sol1[nodenum2+m3]=0;
}
/* Other variables recalc check */
if(sol0[m3])
{
/* Material constants recalc */
ro[m3]=ro0[m3]/sol0[m3];
#if setup>9
if(gy[m2]<waterlev && ro[m3]<1000.1) ro[m3]=1.0;
if(gy[m2]>=waterlev && ro[m3]<1000.1) ro[m3]=1000.0;
#endif
et[m3]=et0[m3]/sol0[m3];
cp[m3]=(cp0[m3]/sol0[m3])/ro[m3];
kt[m3]=kt0[m3]/sol0[m3];
ht[m3]=ht0[m3]/sol0[m3];
/* Advective addition for T K in nodes recalc */
if (sol1[m3])
{
tk[m3]=tk0[m3]/sol1[m3];
sol1[m3]=0;
}
/* Reset weight */
sol0[m3]=0;
}
}
}
if (printmod) fprintf(fp_log,"Min, Max viscosity %e %e \n",numin,numax); fflush(fp_log);
/* Reset advective temperature */
for (m3=0;m3<nodenum;m3++) {tk3[m3]=0;}
/* Check Upper/Lower limits for nu[] after given contrast */
if(nucontr>1.0 && numin>0) numax=numin*nucontr;
if(nucontr<1.0 && numax>0) numin=numax*nucontr;
for (m3=0;m3<nodenum;m3++)
{
if(nu[m3]<numin) nu[m3]=numin; if(nu[m3]>numax) nu[m3]=numax;
if(nd[m3]<numin) nd[m3]=numin; if(nd[m3]>numax) nd[m3]=numax;
}
/* Water/air density */
#if setup>9
for (m1=0;m1<xnumx;m1++)
for (m2=0;m2<ynumy;m2++)
{
m3=m1*ynumy+m2;
if(gy[m2]<waterlev && ro[m3]<1000.1) ro[m3]=1.0;
if(gy[m2]>=waterlev && ro[m3]<1000.1) ro[m3]=1000.0;
}
#endif
/* ---> 5. Set Boundary conditions for T <---*/
if (printmod) fprintf(fp_log,"\n AVERAGE TEMPERATURE CORRECTION FOR BOUNDARY CONDITIONS ...\n"); fflush(fp_log);
tkrecalc();
if (printmod) fprintf(fp_log,"AVERAGE TEMPERATURE OK!\n"); fflush(fp_log);
/* Adiabate computing */
if(1==0 && timesum<3.15576e+7*1e+3)
{
/* Lower boundary TK - Node Cycle */
for (m1=0;m1<xnumx;m1++)
{
/* Cur Line Num in bondm[] */
m2=(m1+1)*ynumy-1;
m3=bondm[m2+nodenum3];
if(m3)
{bondv[m3][0]=tk[m2-1]*2.0-tk[m2-2];}
}
}
if (printmod==10000) fprintf(fp_log,"\n Time taken for M->N2 in ronurecalc = %e s \n",omp_get_wtime()-start);
/* END ADD MARKERS TO THE v-CELLS ========================== */
}
/* End ro[],nu[] recalc after marker positions - routine */
/* Calc density for given P,T */
void dencalcomp(double mtk, double mpb, double x, double y, int mm2, double *mro, double *mbb, double *maa)
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm2 - Rock number */
{
/* Ro=ro0*(1-bro*(TK-298.15))*(1+aro*(Pkbar-0.001)) */
*mro=markro[mm2]*(1.0-markbb[mm2]*(mtk-298.15))*(1.0+markaa[mm2]*(mpb-1.0)*1e-3);
/* Adiabatic term: al=bro/(1-bro*(Tk-298.15)) */
*mbb=markbb[mm2]/(1.0-markbb[mm2]*(mtk-298.15));
/* Compressibility: be=aro/(1+aro*(Pkbar-0.0001) */
*maa=1.e-8*markaa[mm2]/(1.0+markaa[mm2]*(mpb-1.0)*1e-3);
/* Constant density */
if (densimod==0) *mro=markro[mm2];
}
/* End OMP Calc density for given P,T */
/* OMP Antigorite weakening of mantle */
void antigoromp(double mtk, double mpb, double x, double y, long int mm1, long int m10, char markt[])
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm1 - mark number */
/* m10 - Up Left Node X,Y Num */
{
/* Val buffer */
double k1,sy1,e,hydry,yfiltr,hydryl,tsubd,vxs,vys;
/* Check marker type */
if(markt[mm1]!=11 && markt[mm1]!=13) return;
/* Relativ Normalized coord Calc */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
/* Erosion surface; oceanic crust top */
sy1=(e*ep[m10+1]+(1.0-e)*ep[m10]);
/* Antigorite weakening of mantle above oceanic crust */
/* Atg stability field after Schmidt and Poli, 1998 */
if((y-sy1)>63000.0)
{
k1=1013.17699-0.060387633e-3*(y-sy1)-0.004289442e-6*(y-sy1)*(y-sy1);
}
else
{
k1=751.490422+6.00773668e-3*(y-sy1)-0.034690759e-6*(y-sy1)*(y-sy1);
}
/* Change marker Type */
/* Serpentinized (13) - to hydrated (11) */
if(k1<=mtk && markt[mm1]==13) markt[mm1]=11;
/* Hydrated(11) - to serpentinized (13) */
if(k1>mtk && markt[mm1]==11) markt[mm1]=13;
}
/* OMP End Antigorite weakening of mantle */
/* Nu calc after reological equation */
// Uses timestepe or computational visco-elastic timestep
/* P-T-stress dependent rheology without/with brittle/ductile transition */
/* Reological equations */
/* Stress>SScr */
/* Power law dislocation creep: SSii={NU0*EEii*exp[(E+PV)/RT]}^(1/n) */
/* Effective viscosity: NU=1/2*{NU0*exp[(E+PV)/RT]}^(1/n)*EEii^[(1-n)/n] */
/* Stress<SScr */
/* Newtonian diffusion creep: SSii=NU1*EEii*exp[(E+PV)/RT] */
/* Effective viscosity: NU=NU0/2*exp[(E+PV)/RT] */
/* NU1=NU0/SScr^(n-1) */
/* SScr - dislocation, diffusion transition stress */
/* SSii - second invariant of deviatoric stress tensor */
/* EEii - epsin - second invariant of strain rate tensor */
/* E - activation energy, J */
/* V - activation volume, J/bar */
/* R - gase constant 8.314 J/K */
/* Viscosity NU calc after reological equations */
/* NU=SSii/(2*EEii) */
/* Brittle - Ductile transition */
/* sbrit=MINV(0.85e+5*pb,60e+6+0.6e+5*pb)*lambda; (Schott & Schmeling, 1998) */
/* sbrit=MINV(0.667e+5*pb,51.2e+6+0.512e+5*pb)*lambda; (Brace & Kohlsstedt, 1980) */
void viscalcomp(double mtk, double mpb, double cmx, double cmy, double Markv, double Markwa, double Markk, double Markp, double Markt, double Markexx, double Markexy, double Markxx[], double Markxy[], double Marke[], long int mm1, int mm2, int yn, long int m10,double *mnu, double *mdi0)
/* mtk - T, K */
/* mpb - P, bar */
/* cmx,cmy - XY location of point for Vx,Vy calc */
/* mm1 - Marker number */
/* mm2 - rock type */
/* yn - plastic reset yes(1)/no(0) - switch from version 1 to 2 ! */
// bbrit_cor - slip velocity dependent correction for friction coefficient
{
/* Val buffer */
double xnu,nnu,e,n,rt=8.314*mtk,k1,e1,epsin,sigin,sduct,sbrit,nueff,strain,abrit,bbrit,nubrit,nunewt,nupowl,nuduct;
/* Reological Eq par */
double sy1,lamb,xelvis,sxxnew,sxynew,siginnew,mnu0,mnu1,mnu2,siginnew0,siginnew1,siginnew2,dsiginnew0,dsiginnew1,dsiginnew2;
/* Counters */
long int m1;
int ncount=0;
// Slip velocity dependent friction
double relvw=60,dvw,dvs;
/* Melted rocks */
// But incoming mm2 is already mm2_actual-20
#if setup>9
if (mm2>20) { *mnu = markn0[mm2]; return; }
#endif
/* Non-melted rocks, mm2 <= 20 */
/* Calc effective strain rate, stress after second strain rate Tenzor invariant EEii=(1/2SUM(EPSik^2))^(1/2) */
// Interpolation to these markers done at end of viterate() of previous timestep
epsin=pow(Markexx*Markexx+Markexy*Markexy,0.5);
sigin=pow(Markxx[mm1]*Markxx[mm1]+Markxy[mm1]*Markxy[mm1],0.5);
/* --- 1. Calculate components of brittle strength; cohesion, friction, and hydrostatic pore pressure weakening factor --- */
// - Lambda brittle weakening factor for hydrostatic pore pressure -
/* Up Left Node X,Y Num */
m1=m10;
/* Relative normalized coord calc */
e=(cmx-gx[m1])/(gx[m1+1]-gx[m1]);
n=(e*ep[m1+1]+(1.0-e)*ep[m1]);
// Pore fluid pressure correction: lamb = Pf/Ps
lamb=markll[mm2];
#if setup>9
// Predefine fluid pressures near surface
if ((cmy-n)<=0) lamb=hidrl;
if ((cmy-n)>0 && (cmy-n)<hidry) lamb=hidrl*(1.0-(cmy-n)/hidry)+lamb*(cmy-n)/hidry;
// Lower friction in fluid/melt present areas
if (Markwa==1) {lamb=lambfld;}
#endif
/* - Strain weakening - */
strain=Marke[mm1];
/* A,B coefficients calc depending on integral strain */
abrit=marka0[mm2];
bbrit=markb0[mm2];
if(strain>marke1[mm2])
{
abrit=marka1[mm2];
bbrit=markb1[mm2];
}
else
{
if(strain>marke0[mm2] && marke1[mm2]>marke0[mm2])
{
abrit=marka0[mm2]+(marka1[mm2]-marka0[mm2])*(strain-marke0[mm2])/(marke1[mm2]-marke0[mm2]);
bbrit=markb0[mm2]+(markb1[mm2]-markb0[mm2])*(strain-marke0[mm2])/(marke1[mm2]-marke0[mm2]);
}
}
/* --- End calculation of brittle strength components; cohesion, friction, and hydrostatic pore pressure weakening factor --- */
/* --- Start Peierl's creep viscosity calculation --------------------------------------*/
/**
* \brief Calculates Peierls's creep viscosity.
*
* @param[in] mtk
* @param[in] mm2
* @param[in] epsin
* @param[in] sigin
*
* @param[out] siginnew
* @param[out] nupeierls
* @return The calculated Peierl's creep viscosity
*/
/* Peierls plasticity-creep mechanism, data from Katayama and Karato, 2008 */
double nupeierls = 0;
int peierls_on = 0;
/*
* from function arguments: mm2, mtk,
* declared previously: sigin, epsin
*/
if (global_peierls_creep && mm2 > 1 && mm2 < 20 && sigin > 1e+8 &&
mtk < 1373.0 && epsin > 0) {
int n1 = 9;
peierls_on = 1;
/* Constant f(p,q), 1/s/MPa^2 */
double const A = pow(10.0,7.8) * 1e-12;
/* 9.1e+9 Dry Peierls stress at 0 K f(p,q), MPa Evans & Goetze, 1979 */
double sig0 = 9.1e+9;
if (0==1 && mm2!=9 && mm2!=10 && mm2!=14) {
/* Wet Peierls stress at 0 K f(p,q), MPa
* Katayama & Karato, 2008 */
sig0 = 2.9e+9;
n1 = 11;
}
/* Using bisection */
double sigmin = 1e+6;
double sigmax = sig0 * 0.9999;
/*
* global variable, in OMP pragma: markdh, markdv, mpb
*/
double const k1min = A * pow(sigmin, 2.0) * exp(MAXV(-100.0,-(markdh[n1] + markdv[n1] * mpb) / rt * pow(1.0 - sigmin / sig0, 2.0)));
for (int n2 = 0; n2 < 15; n2++) {
siginnew = (sigmax + sigmin) / 2.0;
/* siginnew=pow(sigmax*sigmin,0.5); */
k1 = A * pow(siginnew, 2.0) * exp(MAXV(-100, -(markdh[n1] + markdv[n1] * mpb) / rt * pow(1.0 - siginnew / sig0, 2.0)));
if((k1<epsin && k1min<epsin) || (k1>epsin && k1min>epsin)) {
sigmin = siginnew;
} else {
sigmax = siginnew;
}
/*
* p=1.0;
* q=2.0;
* k1 = A * pow(siginnew,2.0)*exp(-(markdh[n1]+markdv[n1]*mpb)/rt*pow(1.0-pow(siginnew/sig0,p),q));
* getchar();
* printf("%d %ld %d %e %e %e %e %e %e %e %e %e %e",n2,mm1,mm2,mtk,mpb,sigin,epsin,siginnew,k1,k1min,k1max,sigmin,sigmax);getchar();
*
*/
//printf("%ld %d %e %e %e %e %e %e",mm1,mm2,mtk,mpb,sigin,epsin,siginnew,1.0/nupeierls);
//static double const p = 1.0, q = 2.0;
//printf("%d %ld %d %e %e %e %e %e %e %e %e %e %e",n2,mm1,mm2,mtk,mpb,sigin,epsin,siginnew,k1,k1min,k1max,sigmin,sigmax);
}
//nupeierls = 1.0 / (0.5 * siginnew / epsin);
// try not using the inverse viscosity
nupeierls = (0.5 * siginnew / epsin);
/*
* if(nupeierls>1e-23 && (markx[mm1]<5e+4 || markx[mm1]>xsize-5e+4)) nupeierls=1e-23;
*/
}
/* --- End Peierl's creep viscosity calculation --------------------------------------*/
/* --- Start ductile viscosity calculation -------------------------------------------*/
/* Inverted value of newtonian NU set */
nunewt=0;
/* Inverted value of power-low NU set */
nupowl=0;
/* Check for the presence of ductile rheology */
// For more viscosity options, see codes of version 1
if (marknu[mm2])
{
/* A) Simple Newtonian rheology */
// - used in laboratory model of van Dinther et al., JGR, 2013a -
/* Newtonian creep: SSii=NU0*2.0*EEii */
/* Effective viscosity: NU=NU0 */
/* Effective viscosity member in Stoks: NUs=NU */
if(markdh[mm2]==0 && markdv[mm2]==0 && (markss[mm2]==0 || markmm[mm2]==1.0))
{
/* Inverted value of newtonian NU calc */
nunewt=1.0/marknu[mm2];
}
/* --> D) P-T-stress dependent rheology without/with brittle/ductile transition <--*/
// - used in large-scale models PhD thesis van Dinther -
/* Reological equations */
/* Stress>SScr */
/* Power law dislocation creep: SSii={NU0*EEii*exp[(E+PV)/RT]}^(1/n) */
/* Effective viscosity: NU=1/2*{NU0*exp[(E+PV)/RT]}^(1/n)*EEii^[(1-n)/n] */
/* Effective viscosity member in Stoks: NUs=NU/n */
/* Stress<SScr */
/* Newtonian diffusion creep: SSii=NU1*EEii*exp[(E+PV)/RT] */
/* Effective viscosity: NU=NU0/2*exp[(E+PV)/RT] */
/* Effective viscosity member in Stoks: NUs=NU */
/* NU1=NU0/SScr^(n-1) */
if(marknu[mm2]>0 && (markdh[mm2]!=0 || markdv[mm2]!=0) && markss[mm2]!=0 && markmm[mm2]!=1.0)
{
// ---> 2. Calculate ductile viscosity <---
/* T-P exponent for effective NU calc */
e1=(markdh[mm2]+markdv[mm2]*mpb)/rt;
if(e1>150.0) e1=150.0;
e1=exp(e1);
/* Koef for stress independent creep NU1 calc */
k1=marknu[mm2]/pow(markss[mm2],markmm[mm2]-1.0);
/* Inverted value of newtonian NU calc for diffusion creep */
nunewt=1.0/(0.5*k1*e1);
mnu2=nunewt;
/* Effective viscosity1 calc */
siginnew1=siginnew=sigin;
nupowl=0;
// Calculate dislocation creep viscosity
if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2]));
mnu1=nupowl;
//Take arithmetic average of dislocation and diffusionc creep for effective ductile viscosity
mnu0=1.0/(mnu1+mnu2);
// ---> 3. Include elastic part for estimation future viscoelastic stresses <---
// Calculate visco-elasticity factor
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0);
// Calculate viscoelastic stress
siginnew2=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis);
dsiginnew1=siginnew2-siginnew1;
/* Effective viscosity2 calc */
// See above for description. Repeated here
siginnew=siginnew2;
nupowl=0;
if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2]));
mnu1=nupowl;
mnu0=1.0/(mnu1+mnu2);
// Calculate visco-elasticity factor
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0);
// Calculate viscoelastic stress
siginnew=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis);
dsiginnew2=siginnew-siginnew2;
/* ---> 4. Local iterations for dislocation viscosity calculation by Bisection method <--- */
ncount=0;
// Locally iterate over nupowl and siginnew until siginnew-siginnew0<10 Pa (or 100 iterations)
do
{
// Check to prevent num issue when stress is not changing: only not true and in if sigma_2_1 = sigma_1_1 = sigma_0 : almost never no stress change?
dsiginnew0=ABSV(dsiginnew1)+ABSV(dsiginnew2);
if(dsiginnew0>0)
{
// Weigth factor: 0.5 = midpoint
dsiginnew0=0.5;
// Calculate midpoint = new estimate stress
siginnew0=siginnew=siginnew1*(1.0-dsiginnew0)+siginnew2*dsiginnew0;
// Update viscosity with that new stress
nupowl=0;
if (siginnew>0) nupowl=1.0/(0.5*siginnew*marknu[mm2]*e1/pow(siginnew,markmm[mm2]));
mnu1=nupowl;
mnu0=1.0/(mnu1+mnu2);
// Update stress estimate with new viscosity
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+mnu0);
siginnew=2.0*mnu0*epsin*xelvis+sigin*(1.0-xelvis);
// Calculate difference new and last stress estimate -> converging?
dsiginnew0=siginnew-siginnew0;
// Use this newest estimate for stress change to see if in same direction
// If yes; keep going in that direction; leave oldest estimate behind
if((dsiginnew0>=0 && dsiginnew1>=0) || (dsiginnew0<0 && dsiginnew1<0))
{
siginnew1=siginnew0;
dsiginnew1=dsiginnew0;
}
// If in opposite direction; passed optimal stress so turn back; leave newest 1 estimate behind
else
{
siginnew2=siginnew0;
dsiginnew2=dsiginnew0;
}
}
ncount++;
}
while(ABSV(dsiginnew0)>10.0 && ncount<101);
}
}
/* --- End Ductile viscosity calculation -------------------------------------------*/
// Check ductile effective viscosity calculation
nueff = 1.0 / (nunewt + nupowl);
// Add Peierl's creep viscosity limiter
if (peierls_on && nupeierls < nueff) nueff = nupeierls;
/* Mantle viscosity */
#if setup > 9
if((Markt==9 || Markt==10) && timesum<3.15576e+7*1e+4 && nueff<1e+20) nueff=1e+20;
#endif
if (nueff <nubeg) {
nueff = nubeg;
}
if (nueff > nuend) {
nueff = nuend;
}
if (nueff <markn0[mm2]) {
nueff = markn0[mm2];
}
if (nueff > markn1[mm2]) {
nueff = markn1[mm2];
}
nuduct=nueff;
*mdi0=0;
/* ------------------ Calculate viscoplastic viscosity ---------------------------- */
// Calculate brittle strength - sbrit -
// Plasticity switched off when both terms in the yield strength formulation are 0
if(((1-markll[mm2])*markb0[mm2] || abrit) && epsin)
{
// --- Strong slip velocity dependency of friction coefficient ---
// After Burridge and Knopoff (1967), Ampuero and Ben-Zion (2008), etc.
// Adapt friction parameters based on x-location (lab) or temperature (large-scale)
#if setup==10
if (mm2==7 && cmx>(700e3-shift_km) && cmx<(1150e3-shift_km) && cmy<100e3 && veldepfric==1)
#endif
#if setup==11 // Including off-megathrust rate weakening
if (cmx>(700e3-shift_km) && cmx<(1150e3-shift_km) && cmy<100e3 && veldepfric==1)
#endif
#if setup==12 // Collisional setup - L. Dal Zilio
if (cmx>(1700e3-shift_km) && cmx<(2150e3-shift_km) && cmy<100e3 && veldepfric==1)
#endif
#if setup < 10
if (mm2==5 && veldepfric==1)
#endif
{
// Calculate Relative amount of Velocity-Weakening vs Velocity-Strengthening
#if setup>9
// Velocity-strengthening region
if (Markk<=tk_updipsez0) // && mm2==7)
{ relvw = 0; }
// Transitions to seismogenic zone: updip
else if (Markk>tk_updipsez0 && Markk<tk_updipsez1) // && mm2==7)
{ relvw = (Markk-tk_updipsez0)/(tk_updipsez1-tk_updipsez0); }
// Velocity-weakening for Seismogenic Zone (and off-megathrust region)
else
{ relvw = 1;}
// Note for the off-events setup there is no strengthening outside the subduction channel of basaltic crust
#elif setup < 10
// Change mm2 locally in this viscalc-routine, so that also for viscosity, shear modulus, Pf/Ps etc use this
// Seismogenic Zone = velocity-weakening
if (cmx >= end_updip && cmx <= start_downdip)
{ relvw = 1; mm2 = 6; }
// Transitions to seismogenic zone: updip
else if (cmx >= start_updip && cmx <= end_updip)
{ relvw = (cmx-start_updip)/(2*half_range);}
// Transitions away from seismogenic zone: downdip
else if (cmx >= start_downdip && cmx <= end_downdip)
{ relvw = 1 - ( cmx-start_downdip )/( 2*half_range );}
// Velocity-strenghtening region
else
{ relvw = 0; }
#endif
// Calculate slip-rate dependent change of coefficients
mvslip[mm1] = 2.0*epsin*res_high;
dvw = (1-mgamma_vw)+mgamma_vw/(1.0+mvslip[mm1]/mvc_vw);
dvs = (1-mgamma_vs)+mgamma_vs/(1.0+mvslip[mm1]/mvc_vs);
// Change friction coefficient accordingly
bbrit = mus_vs*dvs + relvw*(markb0[mm2]*dvw-mus_vs*dvs);
// Change cohesion as a function of slip velocity, if desired (if marka0[5]=~marka0[6])
#if setup>9
abrit = marka0[mm2];
#elif setup < 10
abrit = marka0[5] + relvw*(marka0[6]-marka0[5]);
#endif
// Iterate locally to obtain stable estimate of slip-rate
sbrit=abrit+bbrit*(1-lamb)*Markp;
if(sbrit>0 && Markv>0)
{
for(ncount=0;ncount<5;ncount++)
{
if(sbrit>0)
{
// epsin = 0.5* sbrit/eta in viscous formulation
mvslip[mm1] = sbrit/Markv*res_high;
dvw = (1-mgamma_vw)+mgamma_vw/(1.0+mvslip[mm1]/mvc_vw);
dvs = (1-mgamma_vs)+mgamma_vs/(1.0+mvslip[mm1]/mvc_vs);
bbrit = mus_vs*dvs + relvw*(markb0[mm2]*dvw-mus_vs*dvs);
sbrit=abrit+bbrit*(1-lamb)*Markp;
}
else
{
fprintf(fp_log,"LOOK: Sbrit is <= 0 within v-w loop: %e, abrit = %e, bbrit = %e, pr = %e, markvis = %e, x = %e, y = %e \n",sbrit,abrit,bbrit,Markp,Markv,cmx,cmy); fflush(fp_log);
}
}
}
// Calculate average value stresses and strainrates seismogenic zone
// But here do not have proper stress and vel(e) yet ! Only yield strength ..
#if setup < 10
if (relvw==1 && cmy<=gy[n_glayer+1])
{
sbrit_ave = sbrit_ave + (abrit + bbrit*(1-lamb)*Markp);
count_sezm = count_sezm + 1;
}
#endif
}
// In case of no rate dependency also calculate yield strength
else
{
sbrit=abrit+bbrit*(1-lamb)*Markp;
}
// Check strength values
if(sbrit<0) sbrit=0;
if(sbrit>marks1[mm2]) sbrit=marks1[mm2];
// Save frictional properties to file for analyses
// Save time and space by only doing at last timestep in prn output cycle
#if setup > 9
if (n0==1 && start_cond==1 && relvw<=1.0)
{ fprintf(flfric," %d %e %e %e %e %e %e %e %e \n", mm2, cmx, cmy, Markp, sbrit, Markxx[mm1], Markxy[mm1],lamb,bbrit); }
#endif
// Store yield stress for post-processing and interpolation to nodes
msbrit[mm1] = sbrit;
// Store old-stress
msii_old[mm1] = sigin;
/* ---> 5. ! Viscoelastic case ! <--- */
if(stoksmod && timestepe && epsin)
{
/* Future plastic creep */
/* Future stresses calc */
xelvis=markgg[mm2]*timestepe/(markgg[mm2]*timestepe+nueff);
siginnew=2.0*nueff*epsin*xelvis+sigin*(1.0-xelvis);
// Plastic yielding if new estimate or stress of previous timestep exceeds strength
if(sbrit<siginnew || sbrit<sigin)
{
/* Executing plasticity by reseting stresses and viscosities */
// Note yn is defined at call to this viscalc routine
if(yn==1)
{
/* XXX Density correction for the dilation angle XXX */
// We do not use dilation ! Not for any rock type
if(markf0[mm2]>0 && markf1[mm2]>0)
{
/* Second invariant of viscoplastic strain calc, check */
e1=Marke[mm1]-sbrit/2.0/markgg[mm2];
/* Correction of divergence rate for plastic strain rate */
if(e1<markf1[mm2])
{
e1=epsin-sbrit/2.0/nuduct;
if(e1) *mdi0=2.0*e1*markf0[mm2]*timestepe;
}
}
/* ! Recompute stress ! So stress no longer exceed strength */
if(sigin && sbrit<sigin)
{
Markxx[mm1] *= sbrit/sigin;
Markxy[mm1] *= sbrit/sigin;
sigin=sbrit;
}
/* ! Recompute viscosity ! So decrease viscosity accordingly to localize deformation */
nubrit=sbrit/(2.0*epsin+(sigin-sbrit)/timestepe/markgg[mm2]);
if(nubrit<nueff) nueff=nubrit;
/* Set initial plastic strain */
if(Marke[mm1]<=0) Marke[mm1]=1e-20;
}
}
else
{
if(yn==1) Marke[mm1]=0;
}
}
}
/* ------------------ End calculation viscoplastic viscosity ---------------------------- */
/* Check calculated viscosity to be within hard code minimum and maximum */
if (nueff < nubeg) {
nueff = nubeg;
}
if (nueff > nuend) {
nueff = nuend;
}
if (nueff < markn0[mm2]) {
nueff = markn0[mm2];
}
if (nueff > markn1[mm2]) {
nueff = markn1[mm2];
}
// Pass final viscosity back to main model
*mnu = nueff;
}
/* Number of nearest left vertical line find */
long int m1serch(double cmx)
/* cmx - X coordinate */
{
/* Variables */
long int m1,m10=0,m11=xnumx-1;
/* Serch cycle */
do
{
m1=(m10+m11)/2;
if (gx[m1]>cmx) m11=m1; else m10=m1;
}
while((m11-m10)>1);
if(m10>xnumx-2) m10=xnumx-2;
return m10;
}
/* Number of nearest left vertical line find */
/* Number of nearest upper horizontal line find */
long int m2serch(double cmy)
/* cmy - Y coordinate */
{
/* Variables */
long int m2,m20=0,m21=ynumy-1;
/* Serch cycle */
do
{
m2=(m20+m21)/2;
if (gy[m2]>cmy) m21=m2; else m20=m2;
}
while((m21-m20)>1);
if(m20>ynumy-2) m20=ynumy-2;
return m20;
}
/* Number of nearest upper horizontal line find */
/* Erosion/Sedimentation Function for markers */
/* mardy - marker vertical size, m */
void erosmarkomp(long int mm1, int yn, long int m10, double x, double y, char markt[], double marke[], double markd[])
/* mm1 - marker number */
/* yn - current sedimnts type 2,3 */
/* m1 - Up Left Node X,Y Num */
{
/* Variables */
double e,e0;
/* Surface level elevation definition */
/* Relativ Normalized coord Calc */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
/* Surface level elevation for marker definition */
e0=(e*ep[m10+1]+(1.0-e)*ep[m10]);
/* Marker surface elevation definition */
if(markt[mm1]<2)
{
/* Water/Air -> Sediments conversion */
if(y>e0) {markt[mm1]=yn; marke[mm1]=0; markd[mm1]=-1.0;}
}
if(markt[mm1]>1)
{
/* Rock->Water/Air conversion */
if(y<e0) {markt[mm1]=0; marke[mm1]=0; markd[mm1]=-1.0;}
}
}
/* OMP End Erosion/Sedimentation Function for markers */
/* OMP Rock to rock+melt transformation */
void meltingomp(double mtk, double mpb, long int mm1, int mm2, char Markt[], double Marke[], double *mxmelt, double *mhlatent)
/* mtk - T, K */
/* mpb - P, bar */
/* mm1 - mark number */
{
/* Melting related cahnge of the marker type */
/* Check marker type */
if (mm2==3 || mm2==4 || mm2==5 || mm2==6 || mm2==7 || mm2==8 || mm2==11 || mm2==16 || mm2==23 || mm2==24 || mm2==25 || mm2==26 || mm2==27 || mm2==28 || mm2==34 || mm2==36 || mm2==37 || mm2==38)
if (mpb<0) mpb=0;
switch(mm2)
{
/* Sediments, upper crust */
case 3:
case 4:
case 5:
case 17:
case 23:
case 24:
case 25:
case 26:
case 37:
/* Basalt, Gabbro */
case 7:
case 8:
case 16:
case 6:
case 18:
case 27:
case 28:
case 36:
case 38:
// mxmelt and mhlatent are already pointers to mem address, so you can enter them without &
meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent);
if(*mxmelt>0 && mm2<20) {Markt[mm1]+=20; Marke[mm1]=0;}
if(*mxmelt<=0 && mm2>20) {Markt[mm1]-=20; Marke[mm1]=0;}
return;
/* Hydrated Peridotite */
case 11:
case 34:
meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent);
if(*mxmelt>0 && mm2==11) {Markt[mm1]=34; Marke[mm1]=0;}
if(*mxmelt<=0 && mm2==34) {Markt[mm1]=14; Marke[mm1]=0;}
return;
/* Others */
default: return;
}
}
/* OMP End Rock to rock+melt transformation */
/* Melt fraction, density, viscosity, heat capacity calculation */
void meltpartomp(double mtk, double mpb, double x, double y, long int mm1, int mm2, double *mro,double *mbb, double *maa, double *mnu, double *mcp, double *mkt, double *mgg, double *mxmelt, double *mhlatent)
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm1 - mark number */
/* mm2 - mark type */
{
/* Val buffer */
double xmelt=0,ival,dmpb,dmtk,sduct,nueff,smin,smax,nmin,nmax,cpadd=0,vx0,vy0,pr0,sp0,ee0;
long int m1,m10,m20;
double Mnu,mdi0;
double p_pl_out,p_ga_in,rokf,p_sp_in,p_ol_out,p_pv_in,p_sp_out,p_st_in;
m10=m1serch(x);
/* Check marker type */
if (mm2==23 || mm2==24 || mm2==25 || mm2==26 || mm2==27 || mm2==28 || mm2==34 || mm2==36 || mm2==37 || mm2==38)
{
/* Calculate melt fraction */
// mxmelt and mhlatent are already pointers to mem address, so you can enter them without &
meltpart1omp(mtk,mpb,mm2,mxmelt,mhlatent);
xmelt = *mxmelt;
/* Standard adiabatic term: al=bro/(1+bro*(Tk-298.15)) */
*mbb=(markbb[mm2]*xmelt+markbb[mm2-20]*(1.0-xmelt))/(1.0-(markbb[mm2]*xmelt+markbb[mm2-20]*(1.0-xmelt))*(mtk-298.15));
*maa=(markaa[mm2]*xmelt+markaa[mm2-20]*(1.0-xmelt))/(1.0+(markaa[mm2]*xmelt+markaa[mm2-20]*(1.0-xmelt))*(mpb-1.0)*1e-3);
/* Density */
/* Ro=ro0 */
if (densimod==0)
{
*mro=markro[mm2]*xmelt+markro[mm2-20]*(1.0-xmelt);
}
/* Ro=ro0*(1-bro*(TK-298.15))*(1+aro*(Pkbar-0.001)) */
else
{
ival=1.0;
/* ========================= */
/* Mantle phase transitions */
/* ========================= */
/*
if(mm2>=29 && mm2<=34 && markex[mm1]>0) ival=1.0-0.04*markex[mm1];
*/
/* Eclogitization, St, Pv transitions in oceanic crust */
if(mm2>=27 && mm2<=28)
{
/* Eclogitization Ito and Kennedy, 1971 */
/*basalt=>garnet granulite (Ga-In) transition*/
p_ga_in=-9222.0+mtk*14.0;
/*Not to have granulites at pressure lower than 2 kbar*/
if(p_ga_in<2000.0) p_ga_in=2000.0;
/*garnet granulite=>eclogite (Pl-Out) transition*/
p_pl_out=-1460.0+mtk*20.0;
/*Not to have eclogites at pressure lower than 12 kbar*/
if(p_pl_out<12000.0) p_pl_out=12000.0;
if(mpb>p_ga_in)
{
rokf=0;
if(mtk>teclmin)
{
if(mtk>teclmax)
{
rokf=0.16;
}
else
{
rokf=0.16*(mtk-teclmin)/(teclmax-teclmin);
}
}
if(mpb>=p_pl_out)
{
ival=1.0+rokf;
}
else
{
ival=(1.0+rokf*(mpb-p_ga_in)/(p_pl_out-p_ga_in));
}
}
/* Coe->St transition Gerya et al., 2004, PCM */
p_st_in=59100.0+mtk*22.6;
if(mpb>p_st_in) ival*=1.06;
/* Pv transition, Mishin et al., 2008 with slope from Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=354000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=352000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.08;
if(mpb>=p_sp_out)
{
ival*=1.0+rokf;
}
else
{
ival*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
/* Ol-Sp and Pv transitions in the mantle */
if(mm2>=29 && mm2<=34)
{
/* Ol-Sp transition, Katsura & Ito, 1989 */
/* Ol-out transition*/
p_ol_out=91000.0+mtk*27.0;
/* Sp-in transition*/
p_sp_in=66000.0+mtk*39.0;
/*Limit width of Sp-Ol transition to 2 kbar */
if(p_sp_in>p_ol_out-2000.0) p_sp_in=p_ol_out-2000.0;
if(mpb>p_sp_in)
{
rokf=0.06;
if(mpb>=p_ol_out)
{
ival*=1.0+rokf;
}
else
{
ival*=(1.0+rokf*(mpb-p_sp_in)/(p_ol_out-p_sp_in));
}
}
/* Pv transition, Ito et al., 1990 */
/* Sp-out transition*/
p_sp_out=304000.0-mtk*40.0;
/* Pv-in transition*/
p_pv_in=302000.0-mtk*40.0;
if(mpb>p_pv_in)
{
rokf=0.11;
if(mpb>=p_sp_out)
{
ival*=1.0+rokf;
}
else
{
ival*=(1.0+rokf*(mpb-p_pv_in)/(p_sp_out-p_pv_in));
}
}
}
/* Density calculation with corrections */
*mro=xmelt*markro[mm2]*(1.0-markbb[mm2]*(mtk-298.15))*(1.0+markaa[mm2]*(mpb-1.0)*1e-3)+(1.0-xmelt)*ival*markro[mm2-20]*(1.0-markbb[mm2-20]*(mtk-298.15))*(1.0+markaa[mm2-20]*(mpb-1.0)*1e-3);
}
/**/
/* Viscosity */
/* Effective NU calc check */
/* Little melt */
// Assume similar to no melt, since go into viscalc..
if(xmelt<0.1)
{
// QUESTION TARAS - why plastic reset here? (i switched yn=1 to yes wrt old version, but before was set to 0 here)
// while mm2 going in is mm2-20 ? And mm2>20 returns immediately; ok that put here mm2-20 ?
viscalcomp(mtk,mpb,markx[mm1],marky[mm1],markv[mm1],markwa[mm1],markk[mm1],markp[mm1],markt[mm1],markexx[mm1],markexy[mm1],markxx,markxy,marke,mm1,mm2-20,1,m10,&Mnu,&mdi0);
*mnu=Mnu;
*mgg=markgg[mm2-20];
}
/* Significant melt */
// Allowed to drop viscosity below minimum for rock type (init.t3c), but not below minimum for whole model (mode.t3c)
else
{
/* Set viscosity and stress limits */
nmin=MAXV(markn0[mm2],nubeg);
nmax=MINV(markn1[mm2],nuend);
smin=MAXV(marks0[mm2],strmin);
smax=MINV(marks1[mm2],strmax);
/* Calc effective strain rate after second strain rate tensor invariant EEii=(1/2SUM(EPSik^2))^(1/2) */
m20=m2serch(y);
allinteriomp(x,y,m10,m20,&vx0,&vy0,&pr0,&sp0,&ee0);
// ee0=pow(eps[6]*eps[6]+eps[4]*eps[4],0.5); (was epsin)
/* Effective NU calc check */
nueff=marknu[mm2]*exp(2.5+pow((1.0-xmelt)/xmelt,0.48)*(1.0-xmelt));
if(nueff<nmin) nueff=nmin;
if(nueff>nmax) nueff=nmax;
/* Ductile stress calc check */
sduct=nueff*2.0*ee0;
if(sduct<smin && ee0) {nueff=0.5*smin/ee0; sduct=smin;}
if(sduct>smax) {nueff=0.5*smax/ee0; sduct=smax;}
*mnu=nueff;
/* Shear modulus */
*mgg=markgg[mm2];
}
/* Heat capacity */
*mcp=markcp[mm2]*xmelt+markcp[mm2-20]*(1.0-xmelt);
/* heat conductivity */
*mkt=((markkt[mm2]+markkf[mm2]/(mtk+77.0))*exp(markkp[mm2]*mpb))*xmelt+((markkt[mm2-20]+markkf[mm2-20]/(mtk+77.0))*exp(markkp[mm2-20]*mpb))*(1.0-xmelt);
/* Additional melting adiabatic term, heat capacity */
if(xmelt>0 && xmelt<1.0)
{
/* Melting adiabatic term: alm=-ro*(dHlat/dP)/T */
/* Numerical differentiation */
dmpb=mpb*0.001;
meltpart1omp(mtk,mpb-dmpb,mm2,mxmelt,mhlatent);
ival= *mhlatent;
meltpart1omp(mtk,mpb+dmpb,mm2,mxmelt,mhlatent);
ival-= *mhlatent;
ival *= *mro / (mtk*2.0*dmpb*1e+5);
*mbb+=ival;
/* Melting heat capacity term: cpm=dHlat/dT */
/* Numerical differentiation */
dmtk=1.0;
meltpart1omp(mtk+dmtk,mpb,mm2,mxmelt,mhlatent);
ival= *mhlatent;
meltpart1omp(mtk-dmtk,mpb,mm2,mxmelt,mhlatent);
ival-= *mhlatent;
ival/=2.0*dmtk;
*mcp+=ival;
}
}
else
{
*maa= *mbb= *mxmelt= *mhlatent= *mro= *mnu= *mcp= *mkt= 0;
}
}
/* End OMP Rock to rock+melt transformation */
/* Melt fraction, latent heat calculation */
void meltpart1omp(double mtk, double mpb, int mm2, double *mxmelt, double *mhlatent)
/* mtk - T, K */
/* mpb - P, bar */
/* x,y - XY location of point for Vx,Vy calc */
/* mm1 - mark number */
/* mm2 - mark type */
/* yn - type of calculation: 0 - Ro, 1 - Nu, 2 - Cp, 3 - kt */
{
/* Val buffer */
double xmelt=0,hlatent=0,ival;
long int m1;
double ykm=mpb*3e-3,ts=0,tl=0;
/* Calculate melt fraction using marker type */
if (ykm>0)
switch(mm2)
{
/* Sediments: latent heat 300 kJ/kg (Bittner & Schmeling, 1995) */
case 3:
case 4:
case 5:
case 17:
case 23:
case 24:
case 25:
case 37:
/* Wet Solidus Temperature, Johannes, 1985, Poli & Schmidt, 2002 */
if (ykm<36.0)
{
ts=889.0+536.6/(ykm+1.609)+18.21/(ykm+1.609)/(ykm+1.609);
}
else
{
ts=831.3+2.0*ykm;
}
/* Dry Granite Liquidus, Johannes, 1985 */
tl=1262.0+3.0*ykm;
hlatent=300000.0;
break;
/* Basalt, Gabbro: latent heat 380 kJ/kg (Bittner & Schmeling, 1995) */
case 7:
case 8:
case 16:
case 27:
case 28:
case 36:
case 6:
case 18:
case 26:
case 38:
/* Wet solidus, Schmidt & Poli, 1998 */
if (ykm<48.0)
{
ts=972.6-2111.0/(ykm+10.63)+70033.0/(ykm+10.63)/(ykm+10.63);
}
else
{
ts=935.4+0.1162*ykm+0.006937*ykm*ykm;
}
/* Dry Toleitic Basalt Liquidus, Hess, 1989 */
tl=1423.15+3.5*ykm;
hlatent=380000.0;
break;
/* Peridotite: latent heat 400 kJ/kg Turcotte & Schubert, 1982, p.171 */
case 11:
case 34:
/* Wet solidus, Schmidt & Poli, 1998 */
if (ykm<72.0)
{
ts=1239.8+1493.0/(ykm+9.701);
}
else
{
ts=1266.3-0.3948*ykm+0.003893*ykm*ykm;
}
/* Dry Peridotite Liquidus, Hess, 1989 */
tl=2073.15+3.8*ykm;
hlatent=400000.0;
break;
/* Other rocks - No melting */
default:
break;
}
/* Melt fraction, latent heat calculation */
*mxmelt = *mhlatent = 0;
if(tl)
{
/* Melt fraction calc, check */
xmelt=(mtk-ts)/(tl-ts);
if(xmelt<0) xmelt=0;
if(xmelt>1.0) xmelt=1.0;
*mxmelt = xmelt;
/* Latent heat calc */
hlatent *= xmelt;
*mhlatent=hlatent;
}
}
/* End OMP Melt fraction, latent heat calculation */
/* Hydration front progress after H2O budget */
double hydration2omp()
{
/* Val buffer */
double ysurf,vfiltr,yfiltr,dydx,dydx1,sy1,sy2,sy3,sy4,sy5,e1,mwamin,x0,y0,x1,y1,vx1,vy1;
double hytimesum,hytimesum0;
/* TD Database variables */
double W0,W1,W2,W3,R0,R1,R2,R3,n,e,dx,dy;
double mtk,mpb,mwa,mro,dmwa,wro;
double Mgg,Mro,Mwa,Mcp,Mbb,Maa,Mdhh,Mkt;
long int m1,m2,m3,mm1,marknum1=marknum;
int mm2,mm3,n1,n2;
fprintf(fp_log,"\n WATER Transport BEGIN \n");fflush(fp_log);
/* Marker steps */
dx=dxwater;
dy=dywater;
/* Min water contents in the hydraten mantle wt% */
mwamin=0.1;
/* Min Distance from erosion surface for water release */
ysurf=8000.0;
/* Clear wa[] wt */
for (m1=0;m1<nodenum;m1++)
{
wa0[m1]=0;
wa1[m1]=0;
sol0[m1]=0;
sol1[m1]=0;
sol0[nodenum+m1]=1e+30;
sol1[nodenum+m1]=-1e+30;
sol0[nodenum2+m1]=1e+30;
sol1[nodenum2+m1]=-1e+30;
fre0[ m1]=1e+30;
fre0[nodenum +m1]=-1e+30;
fre0[nodenum2+m1]=1e+30;
fre0[nodenum3+m1]=-1e+30;
}
/* Fluid marker generation cycle */
double start=omp_get_wtime();
for (mm1=0;mm1<marknum;mm1++)
{
// Reset fluid presence indicator for next marker for loop
markwa[mm1] = 0;
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Erosion surface */
e1=(markx[mm1]-gx[m1])/(gx[m1+1]-gx[m1]);
sy1=(e1*ep[m1+1]+(1.0-e1)*ep[m1]);
/* Check markers out of grid and within hydration range */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && (markk[mm1]>0 || markt[mm1]>=50) && markt[mm1]<100)
if((markd[mm1])>=0 && (markw[mm1])>=0 && mm2>1 && mm2!=9 && mm2!=10)
{
if(mm2<50)
{
/* P, T parameters calc */
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2);
mtk=(markk[mm1]);
/* Mantle to Antigorite transformation */
antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m1,markt);
/* Rocks to rock+melt transformation */
if (markt[mm1]>=20)
{
/* Check melting extent */
if(fre0[ +m3]>markx[mm1]-dx) fre0[ m3]=markx[mm1]-dx;
if(fre0[nodenum +m3]<markx[mm1]+dx) fre0[nodenum +m3]=markx[mm1]+dx;
if(fre0[nodenum2+m3]>marky[mm1]-dy) fre0[nodenum2+m3]=marky[mm1]-dy;
if(fre0[nodenum3+m3]<marky[mm1]+dy) fre0[nodenum3+m3]=marky[mm1]+dy;
}
/* Compute TD variables */
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
mro=Mro;
mwa=Mwa;
/* Water changes in kg/m3 calc */
dmwa=mro*(mwa-markw[mm1])*1e-2;
//{fprintf(fp_log,"H2O MARKER %ld %d %d %e %e %e %e %e %e %e",mm1,mm2,mm3,mtk-273.15,mpb/1000.0,mwa,mro,markw[mm1],markd[mm1],dmwa);getchar();}
//{fprintf(fp_log,"H2O RELEASE %ld %d %d %e %e %e %e %e %e %e",mm1,mm2,mm3,mtk-273.15,mpb/1000.0,mwa,mro,markw[mm1],markd[mm1],dmwa);getchar();}
/* Add water changes to the current cell, kg/m3 */
/* Water release */
if ((markw[mm1]-mwa)>dmwamin)
{
/* Save new water content */
markw[mm1]=mwa;
/* Generation of fluid marker (NO FLUID From melts */
if (markt[mm1]<20 && marky[mm1]>sy1)
{
markt[marknum1]=markt[mm1]+50;
markx[marknum1]=markx[mm1];
marky[marknum1]=marky[mm1];
markk[marknum1]=markk[mm1];
markd[marknum1]=1050.0;
markw[marknum1]=-dmwa;
/* Add aditional markers counter */
marknum1++;
// If new marker is interesting for picking algorithm, flag to follow
// Note is hard-coded in i2.c as well. Only here excluded fluid markers, since immobile can not become fluid
if ( start_cond==1 && marky[marknum1]<85e3 && markx[marknum1]>gx[m10_hr] && markx[marknum1]<gx[m11_hr] && markt[marknum1]>49 && markt[marknum1]<100)
{
follow[marknum1]=2;
nmf++;
}
/* Check hydration extent */
if(sol0[nodenum+m3]>markx[mm1]-dx) sol0[nodenum+m3]=markx[mm1]-dx;
if(sol1[nodenum+m3]<markx[mm1]+dx) sol1[nodenum+m3]=markx[mm1]+dx;
if(sol0[nodenum2+m3]>marky[mm1]-dy) sol0[nodenum2+m3]=marky[mm1]-dy;
if(sol1[nodenum2+m3]<marky[mm1]+dy) sol1[nodenum2+m3]=marky[mm1]+dy;
}
}
else
/* Water consuming */
{
if(dmwa>0)
{
wa1[m3]+=dmwa;
sol1[m3]+=1.0;
}
}
}
else
/* Fluid marker count */
{
/* Check position */
if(marky[mm1]>sy1)
{
/* Check hydration extent */
if(sol0[nodenum+m3]>markx[mm1]-dx) sol0[nodenum+m3]=markx[mm1]-dx;
if(sol1[nodenum+m3]<markx[mm1]+dx) sol1[nodenum+m3]=markx[mm1]+dx;
if(sol0[nodenum2+m3]>marky[mm1]-dy) sol0[nodenum2+m3]=marky[mm1]-dy;
if(sol1[nodenum2+m3]<marky[mm1]+dy) sol1[nodenum2+m3]=marky[mm1]+dy;
}
else
/* Erase fluid marker */
{
markx[mm1]=-1.0;
markk[mm1]=0;
}
}
}
}
/* Rock hydration cycle: rocks get hydrated by changing marker type mm2 */
start=omp_get_wtime();
for (mm1=0;mm1<marknum;mm1++)
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && markt[mm1]<50)
{
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Check markers within hydration range */
if(markx[mm1]>sol0[nodenum+m3] && marky[mm1]>sol0[nodenum2+m3] && (markx[mm1])<sol1[nodenum+m3] && (marky[mm1])<sol1[nodenum2+m3])
{
/* Fluid presence mark */
markwa[mm1]=1;
if(markt[mm1]==9 || markt[mm1]==10 || markt[mm1]==12 || markt[mm1]==14 || markt[mm1]==5 || markt[mm1]==6)
{
/* Mantle Hydration */
if (markt[mm1]!=5 && markt[mm1]!=6)
{
mm2=markt[mm1]=11;
}
else
{
mm2=markt[mm1]=markt[mm1]+12;
}
/* P, T parameters calc */
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2);
mtk=(markk[mm1]);
/* Mantle to Antigorite transformation */
antigoromp(mtk,mpb,markx[mm1],marky[mm1],mm1,m1,markt);
/* Rocks to rock+melt transformation */
if (markt[mm1]>=20)
{
/* Check melting extent */
if(fre0[ +m3]>markx[mm1]-dx) fre0[ m3]=markx[mm1]-dx;
if(fre0[nodenum +m3]<markx[mm1]+dx) fre0[nodenum +m3]=markx[mm1]+dx;
if(fre0[nodenum2+m3]>marky[mm1]-dy) fre0[nodenum2+m3]=marky[mm1]-dy;
if(fre0[nodenum3+m3]<marky[mm1]+dy) fre0[nodenum3+m3]=marky[mm1]+dy;
}
/* Thermodynamic database use for Ro as function of Water content */
/* Compute TD variables */
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
mro=Mro;
mwa=Mwa;
/* Water changes in kg/m3 calc */
dmwa=mro*(mwa-markw[mm1])*1e-2;
/* Add water changes to the current cell, kg/m3 */
/* Water consuming */
if (dmwa>0)
{
wa1[m3]+=dmwa;
sol1[m3]+=1.0;
}
}
}
}
/* Fluid marker computing cycle */
start=omp_get_wtime();
for (mm1=0;mm1<marknum1;mm1++)
{
/* Check markers out of grid and within hydration range */
if(markt[mm1]>=50 && markt[mm1]<100 && markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize)
{
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Erosion surface */
e1=(markx[mm1]-gx[m1])/(gx[m1+1]-gx[m1]);
sy1=(e1*ep[m1+1]+(1.0-e1)*ep[m1]);
/* Water in melt region conversion */
if(markd[mm1]<1100.0 && markx[mm1]>fre0[m3] && marky[mm1]>fre0[nodenum2+m3] && markx[mm1]<fre0[nodenum+m3] && marky[mm1]<fre0[nodenum3+m3]) markd[mm1]=1150.0;
/* Check position, no fluid above erosion/sedimentation level, no fluid passing through the melt */
if(marky[mm1]>sy1 && marky[mm1]<zdeep && (markd[mm1]<1100.0 || (markx[mm1]>fre0[m3] && marky[mm1]>fre0[nodenum2+m3] && markx[mm1]<fre0[nodenum+m3] && marky[mm1]<fre0[nodenum3+m3])))
{
wa0[m3]+=markw[mm1];
sol0[m3]+=1.0;
}
else
/* Erase fluid marker */
{
markx[mm1]=-1.0;
markk[mm1]=0;
}
}
}
if (printmod==10000) fprintf(fp_log,"\n Time taken for fluid computing cycle = %e s \n",omp_get_wtime()-start);
/* Fluid marker consuming cycle */
start=omp_get_wtime();
for (mm1=0;mm1<marknum1;mm1++)
{
/* Marker type */
mm2=(int)markt[mm1]; if (mm2>=100) mm2-=100;
// What use? since will not use mm1>100 anyway..
/* Marker cell number */
m1=m1serch(markx[mm1]);
m2=m2serch(marky[mm1]);
m3=m1*ynumy+m2;
/* Change water consuming rocks and fluid makers */
if(markx[mm1]>0 && marky[mm1]>0 && (markx[mm1])<xsize && (marky[mm1])<ysize && (markk[mm1]>0 || markt[mm1]>=50) && markt[mm1]<100)
if((markd[mm1])>=0 && (markw[mm1])>=0 && mm2>1 && mm2!=9 && mm2!=10 && mm2!=12 && mm2!=14 && mm2!=5 && mm2!=6)
{
// For all assimilating rock types: 0-50, except those one line above
if(mm2<50)
{
/* P, T parameters calc */
// Why need to do this every time again?
mpb=1e-5*allinterpomp(markx[mm1],marky[mm1],m1,m2);
mtk=markk[mm1];
/* Thermodynamic database use for Ro, Water */
/* Compute TD variables */
tdbasecalcomp(markx[mm1],marky[mm1],mtk,mpb,mm2,mm1,m1,&Mgg,&Mro,&Mwa,&Mcp,&Mbb,&Maa,&Mdhh,&Mkt);
mwa=Mwa;
mro=Mro;
/* Water change */
dmwa=mwa-markw[mm1];
/* Add water changes to the current cell, kg/m3 */
/* Water consuming */
if(dmwa>0)
{
if (wa1[m3]<=wa0[m3])
{
/* Save complete new water content */
markw[mm1]=mwa;
}
else
{
/* COmpute, Save partial new water content */
markw[mm1]=markw[mm1]+dmwa*wa0[m3]/wa1[m3];
}
}
}
// For all fluid markers: 50-100
else
/* Fluid marker change */
{
// Evaluate wether all free water is finished
if(wa1[m3]<wa0[m3])
{
/* Count water changes for fluid marker */
markw[mm1]*=1.0-wa1[m3]/wa0[m3];
}
else
/* Erase fluid marker */
{
markx[mm1]=-1.0;
markk[mm1]=0;
}
}
}
}
/* Reset aditional markers */
fprintf(fp_log,"\n WATER BEG Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1); fflush(fp_log);
mm1=0;
while(marknum1>marknum && mm1<marknum)
{
/* Reload marker */
if((markx[mm1]<0 || marky[mm1]<0 || (markx[mm1])>xsize || (marky[mm1])>ysize) && markt[mm1]<100)
{
/* Decrease aditional markers counter */
marknum1--;
if(markx[marknum1]>=0);
{
/* Type save */
markt[mm1]=markt[marknum1];
/* X,Y, water reload */
markx[mm1]=markx[marknum1];
marky[mm1]=marky[marknum1];
markw[mm1]=markw[marknum1];
markd[mm1]=markd[marknum1];
markk[mm1]=markk[marknum1];
}
}
/* Increase markers counter */
mm1++;
}
fprintf(fp_log,"\n WATER END Number of markers: OLD = %ld NEW = %ld \n",marknum,marknum1); fflush(fp_log);
/* Set new marker number */
marknum=marknum1;
return 0;
}
/* End OMP Hydration front progress after H2O budget */
/* Erosion Surface progress */
void erosion()
{
/* Val buffer */
double v0,v1,dydx,x1,vx1,vy1,dy;
double ertimesum,ertimesum0;
long int m1,m2;
/**/
/* Erosion Solution Cycle ------------------------------------------ */
ertimesum=0;
ertimesum0=timestep;
do
{
/* Save old cycle results */
for (m1=0;m1<xnumx;m1++)
{
ep0[m1]=ep[m1];
ep0[xnumx+m1]=ep[xnumx+m1];
}
/**/
/**/
/**/
/* Initial timestep definition */
timestep=ertimesum0-ertimesum;
/**/
/**/
/**/
/* Erosion timestep definition using material velosity field */
for (m1=0;m1<xnumx;m1++)
{
/* Calc horisontal Coordinate */
x1=gx[m1];
/**/
/* EROSION SURFACE */
/* Calc material velocity on the Surface using velosity field */
allinteri(x1,ep0[m1]);
vx1=eps[11];
vy1=eps[12];
/* Check horizontal timestep */
/* Calc x derivative of y position of the Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
timestep=MINV(timestep,(gx[m1]-gx[m1-1])/vx1);
/*
fprintf(fp_log,"111 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1-1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
timestep=MINV(timestep,(gx[m1]-gx[m1+1])/vx1);
/*
fprintf(fp_log,"222 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1+1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
/* Check vertical timestep */
if(vy1)
{
/* Horizontal line num definition */
m2=m2serch(ep0[m1]);
/* Check timestep */
timestep=MINV(timestep,(gy[m2+1]-gy[m2])/ABSV(vy1));
/*
fprintf(fp_log,"333 %ld %e %e %e %e %e %e %e",m2,vx1,vy1,(gy[m2+1]-gy[m2])/ABSV(vy1),ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
/**/
/**/
/* INITIAL SURFACE */
/* Calc material velocity on the Initial Surface using velosity field */
allinteri(x1,ep0[xnumx+m1]);
vx1=eps[11];
vy1=eps[12];
/* Check horizontal timestep */
/* Calc x derivative of y position of the Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
timestep=MINV(timestep,(gx[m1]-gx[m1-1])/vx1);
/*
fprintf(fp_log,"444 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1-1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
timestep=MINV(timestep,(gx[m1]-gx[m1+1])/vx1);
/*
fprintf(fp_log,"555 %ld %e %e %e %e %e %e %e",m1,vx1,vy1,(gx[m1]-gx[m1+1])/vx1,ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
/* Check vertical timestep */
if(vy1)
{
/* Horizontal line num definition */
m2=m2serch(ep0[xnumx+m1]);
/* Check timestep */
timestep=MINV(timestep,(gy[m2+1]-gy[m2])/ABSV(vy1));
/*
fprintf(fp_log,"666 %ld %e %e %e %e %e %e %e",m2,vx1,vy1,(gy[m2+1]-gy[m2])/ABSV(vy1),ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
}
}
/*
fprintf(fp_log,"777 %e %e %e %e",ertimesum0,ertimesum,ertimesum0-ertimesum,timestep);getchar();
*/
/**/
/**/
/**/
/* Displace Surface boundary */
/*
for (m1=1;m1<xnumx-1;m1++)
*/
for (m1=0;m1<xnumx;m1++)
{
/* EROSION SURFACE */
/* Calculation of errosion rate */
v0=0;
if(ep0[m1]<eroslev)
{
v0=eroscon+eroskoe*(eroslev-ep0[m1]);
}
/* Calculation of sedimentation rate */
v1=0;
if(ep0[m1]>sedilev)
{
v1=sedicon+sedikoe*(ep0[m1]-sedilev);
}
/* Calc horisontal Coordinate */
x1=gx[m1];
/**/
/* Calc material velocity on the Surface using velosity field */
allinteri(x1,ep0[m1]);
vx1=eps[11];
vy1=eps[12];
/**/
/* Erase erosion/sedimentation rate for marginal points */
if((m1==0 && vx1>0) || (m1==xnumx-1 && vx1<0)) v0=v1=0;
/**/
/* Calc x derivative of y position of the Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
dydx=(ep0[m1]-ep0[m1-1])/(gx[m1]-gx[m1-1]);
/*
fprintf(fp_log,"AAA %e %e",ep0[m1],dydx);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
dydx=(ep0[m1+1]-ep0[m1])/(gx[m1+1]-gx[m1]);
/*
fprintf(fp_log,"BBB %e %e",ep0[m1],dydx);getchar();
*/
}
/* Recalc new Surface position */
ep[m1]+=timestep*(v0-v1+vy1-dydx*vx1);
/*
fprintf(fp_log,"SURFACE %ld %e %e %e %e %e %e %e %e",m1,x1,v0,v1,vx1,vy1,dydx,ep[m1]);getchar();
*/
/**/
/**/
/**/
/* INITIAL SURFACE */
/* Initial surface displacement */
/* Calc material velocity on the Surface using velosity field */
allinteri(x1,ep0[xnumx+m1]);
vx1=eps[11];
vy1=eps[12];
/* Calc x derivative of y position of Initial Surface using upwind differences */
dydx=0;
if(vx1>0 && m1>0)
{
dydx=(ep0[xnumx+m1]-ep0[xnumx+m1-1])/(gx[m1]-gx[m1-1]);
/*
fprintf(fp_log,"AAA %e ",dydx);getchar();
fprintf(fp_log,"AAA %e ",dydx);getchar();
*/
}
if(vx1<0 && m1<xnumx-1)
{
dydx=(ep0[xnumx+m1+1]-ep0[xnumx+m1])/(gx[m1+1]-gx[m1]);
/*
fprintf(fp_log,"BBB %e ",dydx);getchar();
*/
}
/* Recalc new Initial Surface position */
ep[xnumx+m1]+=timestep*(vy1-dydx*vx1);
/**/
}
/**/
/**/
/**/
/**/
/* Relax EROSION surface */
if (0==0)
for (m1=0;m1<xnumx-1;m1++)
{
/* Calc x derivative of y position */
dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]);
/* Relax surface for critical slope */
if(dydx>slopemax)
{
dy=((ep[m1+1]-ep[m1])-slopemax*(gx[m1+1]-gx[m1]))/2.0;
ep[m1] +=dy;
ep[m1+1]-=dy;
/*
dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]);
fprintf(fp_log,"AAA %ld %e %e",m1,slopemax,dydx);getchar();
*/
}
if(dydx<-slopemax)
{
dy=((ep[m1+1]-ep[m1])+slopemax*(gx[m1+1]-gx[m1]))/2.0;
ep[m1] +=dy;
ep[m1+1]-=dy;
/*
dydx=(ep[m1+1]-ep[m1])/(gx[m1+1]-gx[m1]);
fprintf(fp_log,"BBB %ld %e %e",m1,slopemax,dydx);getchar();
*/
}
}
/**/
/**/
/**/
/* Add Erosion step */
ertimesum+=timestep;
/**/
/**/
/**/
/* Print Results */
if (printmod) { fprintf(fp_log,"\n EROSION STEP = %e YEARS EROSION TIME = %e YEARS \n",timestep/3.15576e+7,ertimesum/3.15576e+7); fflush(fp_log); }
}
while(ertimesum<ertimesum0);
/* Restore timestep */
timestep=ertimesum0;
}
/* Erosion Surface progress */
/* Thermodynamic database use for ro, Cp */
// Within a loop over all markers, do:
// Interpolation properties between four nearest points in thermodynamic database dep. on T,P,composition
void tdbasecalcomp(double x, double y, double mtk, double mpb, int mm2, long int mm1, long int m10, double *Mgg, double *Mro, double *Mwa, double *Mcp, double *Mbb, double *Maa, double *Mdhh, double *Mkt)
{
/* TD Database variables, dTK,dPB - TK, PB step for tabulation in TD database */
double H0,H1,H2,H3,R0,R1,R2,R3,G0,G1,G2,G3,W0,W1,W2,W3,n,e;
/* Val Buffers */
int n1,n2,mm3,ynpb;
double mhh0,mhh1,mdhh,maa,mwa,dmwa,wro,mro,mcp,mbb,mgg,mkt,mkt1,pbmax,xold,kr01,kr1,kr10,xkr,krad;
long int m1=m10;
double sy1,e1;
/* Maximal pressure for the shallow database */
pbmax=pbmin+pbstp*(double)(pbnum-1);
/* Adiabate computing */
ynpb=0; if(1==0 && timesum<3.15576e+7*1e+3) {fprintf(fp_log,"in adiabate: can not right ? \n"); fflush(fp_log); mpb*=timesum/(3.15576e+7*1e+3); ynpb=1;}
/* Reset TD variables */
*Mgg=*Mro=*Mwa=*Mcp=*Mbb=*Maa=0;
/* Thermal conductivity */
/* m895 Dry peridotite Fe=12 */
/* Olivine: Hoffmeister, 1999; Hoffmeister & Yuen, 2005 */
if(mpb<235000.0)
{
/* Lattice k */
mkt1=(1.878+770.9/MINV(mtk,1200.0))*(1.0+4.26e-6*mpb);
/* Radiative k 0.1 mm */
kr01=pow(mtk/4000.0,3.0);
/* Radiative k 1 mm */
kr1=pow(mtk/1774.0,3.0);
/* Radiative k 10 mm */
xkr=pow(mtk/1636.0,10.0);
xkr/=xkr+1.0; kr10=pow((mtk-1000.0*xkr)/1011.0,3.0)-0.7713*xkr;
}
/* Perovskite: Hoffmeister, 1999; Hoffmeister & Yuen, 2005 */
else
{
/* Lattice k */
mkt1=(1.291+1157.0/MINV(mtk,2100.0))*(1.0+2.50e-6*mpb);
/* Radiative k 0.1 mm */
kr01=pow(mtk/3591.0,3.0);
/* Radiative k 1 mm */
kr1=pow(mtk/2117.0,3.0);
/* Radiative k 10 mm */
xkr=pow(mtk/1500.0,4.0); xkr/=xkr+1.0;
kr10=pow((mtk+4000.0*xkr)/5776.0,3.0)+2.822*xkr;
}
krad=kr1;
/* Shallow TD base type */
if(mpb<pbmax && ynpb==0)
{
/* TD base type */
switch (mm2)
{
/* Dry Upper crust */
case 5: mm3=11; break;
/* Wet Upper crust */
case 17: mm3=12; break;
/* Dry Lower crust */
case 6: mm3=13; break;
/* Wet Lower crust */
case 18: mm3=14; break;
/* Sediments */
case 2:
case 3:
case 4: mm3=5; break;
/* Molten Sediments */
case 37:
case 25:
case 22:
case 23:
case 24: mm3=6; break;
/* Basalt */
case 16:
case 7: mm3=7; break;
/* Molten Basalt */
case 36:
case 27: mm3=8; break;
/* Gabbro */
case 38:
case 26:
case 8: mm3=3; break;
/* Molten Gabbro */
case 28: mm3=4; break;
/* Dry peridotite */
case 9:
case 12:
case 14:
case 10: mm3=0; break;
/* Wet peridotite */
case 13:
case 11: mm3=1; break;
/* Molten peridotite */
case 34: mm3=2; break;
/* Unknown type */
default: {fprintf(fp_log,"Shallow TD: Unknown rock type for TD database %d, for marker %ld with T= %f, P=%f \n",mm2,mm1,mtk,mpb); fflush(fp_log); exit(0);}
}
/* ABCD-4Cell Number */
// Get weights for nearest points in thermodynamic database
e=(mtk-tkmin)/tkstp;
if(e<0) e=0;
if(e>(double)(tknum-1)) e=(double)(tknum-1);
n=(mpb-pbmin)/pbstp;
if(n<0) n=0;
if(n>(double)(pbnum-1)) n=(double)(pbnum-1);
n1=(int)(e);
if(n1>tknum-2) n1=tknum-2;
n2=(int)(n);
if(n2>pbnum-2) n2=pbnum-2;
/* e,n Calc */
e=(e-(double)(n1));
n=(n-(double)(n2));
/* Ro H values */
/* 0 2 */
/* 1 3 */
R0=td[n1 ][n2 ][mm3][0]*1000.0;
R1=td[n1 ][n2+1][mm3][0]*1000.0;
R2=td[n1+1][n2 ][mm3][0]*1000.0;
R3=td[n1+1][n2+1][mm3][0]*1000.0;
H0=td[n1 ][n2 ][mm3][1]*1000.0*4.1837;
H1=td[n1 ][n2+1][mm3][1]*1000.0*4.1837;
H2=td[n1+1][n2 ][mm3][1]*1000.0*4.1837;
H3=td[n1+1][n2+1][mm3][1]*1000.0*4.1837;
W0=td[n1 ][n2 ][mm3][4];
W1=td[n1 ][n2+1][mm3][4];
W2=td[n1+1][n2 ][mm3][4];
W3=td[n1+1][n2+1][mm3][4];
G0=td[n1 ][n2 ][mm3][3]*1000.0;G0*=G0*R0;
G1=td[n1 ][n2+1][mm3][3]*1000.0;G1*=G1*R1;
G2=td[n1+1][n2 ][mm3][3]*1000.0;G2*=G2*R2;
G3=td[n1+1][n2+1][mm3][3]*1000.0;G3*=G3*R3;
/* Shear modulus calc by interpolation */
mgg=((G0*(1.0-n)+G1*n)*(1.0-e)+(G2*(1.0-n)+G3*n)*e);
/* Ro calc by interpolation */
mro=((R0*(1.0-n)+R1*n)*(1.0-e)+(R2*(1.0-n)+R3*n)*e);
/* Water wt% calc by interpolation */
mwa=((W0*(1.0-n)+W1*n)*(1.0-e)+(W2*(1.0-n)+W3*n)*e);
/* Add pore fluid */
/* Erosion surface */
e1=(x-gx[m10])/(gx[m10+1]-gx[m10]);
sy1=y-(e1*ep[m10+1]+(1.0-e1)*ep[m10]);
if(marks0[mm2]>0 && sy1>0 && sy1<zmpor && mtk<tkpor)
{
dmwa=marks0[mm2]*(tkpor-mtk)/(tkpor-273.15)*(zmpor-sy1)/zmpor;
mwa+=dmwa;
wro=1050.0;
mro=mro/(1.0+dmwa*1e-2*(mro/wro-1.0));
}
/* Cp calc by interpolation */
mcp=((H2-H0)*(1.0-n)+(H3-H1)*n)/tkstp;
if(mcp<1e+2) mcp=1e+2; else if(mcp>5e+4) mcp=5e+4;
/* Effective adiabatic betta=1/V*dV/dT=ro/T*[-dH/dP+V] calc by interpolation */
mbb=(2.0/(R1+R0)-(H1-H0)/pbstp/1e+5)*(1.0-e)+(2.0/(R3+R2)-(H3-H2)/pbstp/1e+5)*e;
mbb*=mro/mtk;
if(mbb<-1e-2) mbb=-1e-2; else if(mbb>1e-2) mbb=1e-2;
/* Effective compressibility term alpha=1/ro*d(ro)/dP calc by interpolation */
maa=(2.0/(R1+R0)*(R1-R0)*(1.0-e)+2.0/(R3+R2)*(R3-R2)*e)/pbstp/1e+5;
if(maa<0) maa=0;
/* Activation enthalpy recalc using enthalpy changes */
/* Current Enthalpy */
mhh1=((H0*(1.0-n)+H1*n)*(1.0-e)+(H2*(1.0-n)+H3*n)*e);
/* Pmin Enthalpy */
mhh0=(td[n1][0 ][mm3][1]*(1.0-e) + td[n1+1][0 ][mm3][1]*e)*1000.0*4.1837;
/* Enthalpy Difference calc */
mdhh=(mhh1-mhh0);
/* Save TD variables */
*Mgg=mgg;
*Mro=mro;
*Mwa=mwa;
*Mcp=mcp;
*Mbb=mbb;
*Maa=maa;
*Mdhh=mdhh;
*Mkt+=krad;
}
/* Deep TD base type */
if(1==0 || mpb>0.75*pbmax || ynpb==1)
{
switch (mm2)
{
/* MORB DATABASE */
/* UPPER, LOWER Crust */
case 5:
case 6:
case 17:
case 18:
case 37:
case 38:
/* Sediments */
case 2:
case 3:
case 4:
/* Molten Sediments */
case 22:
case 23:
case 24:
/* Molten crust */
case 25:
case 26:
/* Basalt */
case 16:
case 7:
/* Molten Basalt */
case 36:
case 27:
/* Gabbro */
case 8:
/* Molten Gabbro */
case 28: mm3=10; break;
/**/
/* PIROLITE DATABASE */
/* Dry peridotite */
case 9:
case 12:
case 14:
case 10:
/* Wet peridotite */
case 13:
case 11:
/* Molten peridotite */
case 34: mm3=9; break;
// Added missing rock types
case 15:
case 19:
case 20:
case 21:
case 29:
case 30:
/* Unknown type */
default: {fprintf(fp_log,"Deep TD: Unknown rock type for TD database %d, for marker %ld with T= %f, P=%f \n",mm2,mm1,mtk,mpb); fflush(fp_log); exit(0);}
}
/* ABCD-4Cell Number */
e=(mtk-tkmin1)/tkstp1;
if(e<0) e=0;
if(e>(double)(tknum1-1)) e=(double)(tknum1-1);
n=(mpb-pbmin1)/pbstp1;
if(n<0) n=0;
if(n>(double)(pbnum1-1)) n=(double)(pbnum1-1);
n1=(int)(e);
if(n1>tknum1-2) n1=tknum1-2;
n2=(int)(n);
if(n2>pbnum1-2) n2=pbnum1-2;
/* e,n Calc */
e=(e-(double)(n1));
n=(n-(double)(n2));
/* Ro H values */
/* 0 2 */
/* 1 3 */
R0=td[n1 ][n2 ][mm3][0]*1000.0;
R1=td[n1 ][n2+1][mm3][0]*1000.0;
R2=td[n1+1][n2 ][mm3][0]*1000.0;
R3=td[n1+1][n2+1][mm3][0]*1000.0;
H0=td[n1 ][n2 ][mm3][1]*1000.0*4.1837;
H1=td[n1 ][n2+1][mm3][1]*1000.0*4.1837;
H2=td[n1+1][n2 ][mm3][1]*1000.0*4.1837;
H3=td[n1+1][n2+1][mm3][1]*1000.0*4.1837;
W0=td[n1 ][n2 ][mm3][4];
W1=td[n1 ][n2+1][mm3][4];
W2=td[n1+1][n2 ][mm3][4];
W3=td[n1+1][n2+1][mm3][4];
G0=td[n1 ][n2 ][mm3][3]*1000.0;G0*=G0*R0;
G1=td[n1 ][n2+1][mm3][3]*1000.0;G1*=G1*R1;
G2=td[n1+1][n2 ][mm3][3]*1000.0;G2*=G2*R2;
G3=td[n1+1][n2+1][mm3][3]*1000.0;G3*=G3*R3;
/* Shear modulus calc by interpolation */
mgg=((G0*(1.0-n)+G1*n)*(1.0-e)+(G2*(1.0-n)+G3*n)*e);
/* Ro calc by interpolation */
mro=((R0*(1.0-n)+R1*n)*(1.0-e)+(R2*(1.0-n)+R3*n)*e);
/* Water wt% calc by interpolation */
mwa=0;
/* Water in crystals */
if(mm2!=9 && mm2!=10 && mm2!=14 && mpb<235000.0)
{
dmwa=0.1;
mwa+=dmwa;
wro=1050.0;
mro=100.0/((100.0-dmwa)/mro+dmwa/wro);
}
/* Cp calc by interpolation */
mcp=((H2-H0)*(1.0-n)+(H3-H1)*n)/tkstp1;
if(mcp<1e+2) mcp=1e+2; else if(mcp>5e+4) mcp=5e+4;
/* Effective adiabatic betta=1/V*dV/dT=ro/T*[-dH/dP+V] calc by interpolation */
mbb=(2.0/(R1+R0)-(H1-H0)/pbstp1/1e+5)*(1.0-e)+(2.0/(R3+R2)-(H3-H2)/pbstp1/1e+5)*e;
mbb*=mro/mtk;
if(mbb<-1e-2) mbb=-1e-2; else if(mbb>1e-2) mbb=1e-2;
/* Effective compressibility term alpha=1/ro*d(ro)/dP calc by interpolation */
maa=(2.0/(R1+R0)*(R1-R0)*(1.0-e)+2.0/(R3+R2)*(R3-R2)*e)/pbstp1/1e+5;
if(maa<0) maa=0;
/* Activation enthalpy recalc using enthalpy changes */
/* Current Enthalpy */
mhh1=((H0*(1.0-n)+H1*n)*(1.0-e)+(H2*(1.0-n)+H3*n)*e);
/* Pmin Enthalpy */
mhh0=(td[n1][0 ][mm3][1]*(1.0-e) + td[n1+1][0 ][mm3][1]*e)*1000.0*4.1837;
/* Enthalpy Difference calc */
mdhh=(mhh1-mhh0);
/* Thermal conductivity */
mkt=mkt1+krad;
/* Computing transitional parameters */
if(1==0 || mpb>pbmax || ynpb==1)
// Manny has 1==1
{
/* Save TD variables */
*Mgg=mgg;
*Mro=mro;
*Mwa=mwa;
*Mcp=mcp;
*Mbb=mbb;
*Maa=maa;
*Mdhh=mdhh;
*Mkt=mkt;
}
else
{
xold=(pbmax-mpb)/(0.25*pbmax);
/* Save TD variables */
// Second column comes from shallow database assignment above, but I never reach into this deep one !
mgg=mgg*(1.0-xold)+ *Mgg *xold;
mro=mro*(1.0-xold)+ *Mro *xold;
mwa=mwa*(1.0-xold)+ *Mwa *xold;
mcp=mcp*(1.0-xold)+ *Mcp *xold;
mbb=mbb*(1.0-xold)+ *Mbb *xold;
maa=maa*(1.0-xold)+ *Maa *xold;
mdhh=mdhh*(1.0-xold)+ *Mdhh *xold;
mkt=mkt*(1.0-xold)+ *Mkt *xold;
*Mgg=mgg;
*Mro=mro;
*Mwa=mwa;
*Mcp=mcp;
*Mbb=mbb;
*Maa=maa;
*Mdhh=mdhh;
*Mkt=mkt;
}
}
}
/* End OMP Thermodynamic database use for ro, Cp */
// *** Interpolation routines using the following nodal locations ***
/* Staggered Nodes num */
/* [0] [3] [6] */
/* T0,xy0 Vy0 T3,xy3 Vy3 */
/* */
/* Vx0 P4,xx4,yy4 Vx3 P7,xx7,yy7 */
/* */
/* [1] [4] [7] */
/* T,xy1 Vy1 T4,xy4 Vy4 */
/* */
/* Vx1 P5,xx5,yy5 Vx4 P8,xx8,yy8 */
/* */
/* [2] [5] [8] */
/* */
/* */
/* Weights for horizontal and vertical nodes calculation for marker interpolation */
void nodewt(long int m1min, long int m1max, long int m2min, long int m2max, double x, double y, int ynx, int yny)
/* m1min,m1max, m2min,m2max - node X,Y number limits */
/* x,y - current pont coordinates */
/* ynx, yny - Type of shifts: No(0), Back(-1), Forw(1) */
{
/* Eyy vertical position */
long int m3;
int nx,ny;
/* Weigths in horizontal directions */
/* Load distances to xn[] */
if(ynx<0)
{
for (m3=m1min;m3<=m1max;m3++)
{
xn[m3-m1min]=(gx[m3]+gx[m3-1])/2.0;
}
}
if(ynx==0)
{
for (m3=m1min;m3<=m1max;m3++)
{
xn[m3-m1min]=gx[m3];
}
}
if(ynx>0)
{
for (m3=m1min;m3<=m1max;m3++)
{
xn[m3-m1min]=(gx[m3]+gx[m3+1])/2.0;
}
}
/* Calc maximal position in xn[] */
nx=(int)(m1max-m1min);
/* Calc coefficients for horizontal direction */
fdweight(nx,0,x);
/**/
/* Reload horizontal coefficients to cn[] */
for (m3=0;m3<=nx;m3++)
{
cn[m3][1]=cn[m3][0];
}
/* Weigths in vertical directions */
/* Load distances to xn[] */
if(yny<0)
{
for (m3=m2min;m3<=m2max;m3++)
{
xn[m3-m2min]=(gy[m3]+gy[m3-1])/2.0;
}
}
if(yny==0)
{
for (m3=m2min;m3<=m2max;m3++)
{
xn[m3-m2min]=gy[m3];
}
}
if(yny>0)
{
for (m3=m2min;m3<=m2max;m3++)
{
xn[m3-m2min]=(gy[m3]+gy[m3+1])/2.0;
}
}
/* Calc maximal position in xn[] */
ny=(int)(m2max-m2min);
/* Calc coefficients for horizontal direction */
fdweight(ny,0,y);
}
/* End Weights for horizontal and vertical nodes calculation for marker interpolation */
/* Calculation of EE,VX,VY,ESP, and PR by Interpolation */
void allinteriomp(double x, double y, long int m10, long int m20, double *VX, double *VY, double *PR, double *ESP, double *EE)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3;
/* en-NormalisedDistance */
// Keep EXX and EXY local here, so only calculates EE
double e,n,ival,xrat,EXX,EXY;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Check weighting for interpolation */
xrat=2.0/3.0;
if(x<(gx[0]+gx[1])/2.0) xrat=1.0;
if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0;
if(y<(gy[0]+gy[1])/2.0) xrat=1.0;
if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0;
/**/
/**/
/**/
// Store for more usage throughout subroutine
m1=m10;
m2=m20;
/**/
/**/
/**/
/* EXY, ESP interpolation ------------------------ */
// Clear buffer
*ESP=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<1) m10=1; if(m10>xnumx-3) m10=xnumx-3;
if(m20<1) m20=1; if(m20>ynumy-3) m20=ynumy-3;
/**/
/* Calc normalized distances */
// Note that the nodal distance is now fixed, while before could change it with intermod. If want that again see old scripts in dynwif/CleanOldRun..
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* Vx interpolation ------------------------ */
m3=m10*ynumy+m20;
EXY=(1.0-e)*(1.0-n)*exy[m3]+(1.0-e)*n*exy[m3+1]+e*(1.0-n)*exy[m3+ynumy]+e*n*exy[m3+ynumy+1];
*ESP=(1.0-e)*(1.0-n)*esp[m3]+(1.0-e)*n*esp[m3+1]+e*(1.0-n)*esp[m3+ynumy]+e*n*esp[m3+ynumy+1];
/* End EXY, ESP interpolation ------------------------ */
/**/
/**/
/**/
/* Exx, P interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
*EE=0;
*PR=0;
*VX=0;
*VY=0;
/* Horizontal,Vertical limits for interpolation calc */
if(x>(gx[m10]+gx[m10+1])/2.0) m10++;
if(y>(gy[m20]+gy[m20+1])/2.0) m20++;
if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2;
if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0);
n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0);
/* Interpolation ------------------------ */
m3=m10*ynumy+m20;
EXX=(1.0-e)*(1.0-n)*exx[m3]+(1.0-e)*n*exx[m3+1]+e*(1.0-n)*exx[m3+ynumy]+e*n*exx[m3+ynumy+1];
// QUESTION TARAS why Interpolate pressure here, do already in interp or d? Now I do port it back, so rm if no need ...
// I guess you could also formulate this more in general, as sometimes I have the feeling some variables are interpolated needlessly. Could you please go over these routines and removed what is not really need to speed the code up?
*PR=(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1];
// Include small weight (xrat) from farther away nodes for velocities
*VX=( (1.0-e)*(1.0-n)*(vx[m3-1]+vx[m3-ynumy-1])+(1.0-e)*n*(vx[m3]+vx[m3-ynumy]) +e*(1.0-n)*(vx[m3+ynumy-1]+vx[m3-1])+e*n*(vx[m3+ynumy]+vx[m3]) ) * 0.5*(1.0-xrat);
*VY=( (1.0-e)*(1.0-n)*(vy[m3-ynumy]+vy[m3-ynumy-1])+(1.0-e)*n*(vy[m3-ynumy+1]+vy[m3-ynumy]) +e*(1.0-n)*(vy[m3]+vy[m3-1])+e*n*(vy[m3+1]+vy[m3]) ) * 0.5*(1.0-xrat);
//eps[11]+=ival*(vx[m3-1]+vx[m3-ynumy-1])*0.5*(1.0-xrat); QUESTION TARAS Why use nodes above and left above here ??
//eps[12]+=ival*(vy[m3-ynumy]+vy[m3-ynumy-1])*0.5*(1.0-xrat);
// Calculate second invariant
*EE=pow(EXX*EXX+EXY*EXY,0.5);
/* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */
/* Vx interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(y<(gy[m20]+gy[m20+1])/2.0) m20-=1;
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-3) m20=ynumy-3;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-(gy[m20]+gy[m20+1])/2.0)/((gy[m20+2]-gy[m20])/2.0);
/* Vx interpolation ------------------------ */
m3=m10*ynumy+m20;
*VX+=((1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1])*xrat;
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(x<(gx[m10]+gx[m10+1])/2.0) m10-=1;
if(m10<0) m10=0; if(m10>xnumx-3) m10=xnumx-3;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10]+gx[m10+1])/2.0)/((gx[m10+2]-gx[m10])/2.0);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* Vy interpolation ------------------------ */
m3=m10*ynumy+m20;
*VY+=((1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1])*xrat;
/* End Vy interpolation ------------------------ */
/**/
/**/
/**/
}
/* OMP Interpolate Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy from surrounding nodes to marker at x,y */
/* OMP Calculation of T,T0 for current location by Interpolation */
void allintertomp(double x, double y, long int m10, long int m20, double *TK, double *TK2)
/* x,y - XY location of point for Vx,Vy calc */
/* m10, m20 - Upper left node */
// TK - marker temperature
{
/* Counters */
long int m3;
/* en-NormalizedDistance */
double e,n,ival;
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/* T interpolation ------------------------ */
/* Buffer clear */
*TK=*TK2=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* T interpolation ------------------------ */
m3=m10*ynumy+m20;
*TK=(1.0-e)*(1.0-n)*tk[m3]+(1.0-e)*n*tk[m3+1]+e*(1.0-n)*tk[m3+ynumy]+e*n*tk[m3+ynumy+1];
*TK2=(1.0-e)*(1.0-n)*tk2[m3]+(1.0-e)*n*tk2[m3+1]+e*(1.0-n)*tk2[m3+ynumy]+e*n*tk2[m3+ynumy+1];
/* End T interpolation ------------------------ */
}
/* OMP Calculation of T,T0 for current location by Interpolation */
/* OMP Calculation of P by Interpolation */
double allinterpomp(double x, double y, long int m10, long int m20)
/* x,y - XY location of point for Vx,Vy calc */
/* m10, m20 - Upper left node */
{
/* Counters */
long int m3;
/* en-Normalized distance */
double ival,e,n;
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/* Buffer clear */
ival=0;
/* Horizontal,Vertical limits for interpolation calc */
if(x>(gx[m10]+gx[m10+1])/2.0) m10++;
if(y>(gy[m20]+gy[m20+1])/2.0) m20++;
if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2;
if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0);
n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0);
/* P interpolation ------------------------ */
m3=m10*ynumy+m20;
ival=(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1];
/* Return pressure */
return ival;
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
if(timestep){fprintf(fp_log,"P1 %e %e %ld %ld %e %e %e",x,y,m10,m20,e,n,ival);getchar();}
*/
}
/* OMP End calculation of P by Interpolation */
/* Calculation of SIGij by Interpolation */
void allinterdomp(double x, double y,long int m10, long int m20, double *TK,double *EXY,double *EXYE,double *SXY,double *SXYE,double *EXX,double *SXX,double *PR,double *SXXE,double *SPPE,double *EXXE,double *VX, double *MVX, double *VY, double *MVY)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3;
/* en-NormalisedDistance */
double ival,e,n,xrat;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Store Up Left Node X,Y Num for later re-usage */
m1=m10;
m2=m20;
/**/
/**/
/* Check weighting for interpolation */
xrat=2.0/3.0;
if(x<(gx[0]+gx[1])/2.0) xrat=1.0;
if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0;
if(y<(gy[0]+gy[1])/2.0) xrat=1.0;
if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0;
/**/
/**/
/* T interpolation ------------------------ */
/* Buffer clear */
*TK=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* EPSxy Interpolate after interpolation weights */
m3=m10*ynumy+m20;
*TK=(1.0-e)*(1.0-n)*tk[m3]+(1.0-e)*n*tk[m3+1]+e*(1.0-n)*tk[m3+ynumy]+e*n*tk[m3+ynumy+1];
/**/
/* End SIGij old interpolation ------------------------ */
/* End T interpolation ------------------------ */
/**/
/**/
/**/
/* SIGxy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
*EXY=*EXYE=*SXY=*SXYE=0;
/* Horizontal,Vertical limits for interpolation calc */
if(m10<1) m10=1; if(m10>xnumx-3) m10=xnumx-3;
if(m20<1) m20=1; if(m20>ynumy-3) m20=ynumy-3;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/**/
/* EPSxy Interpolate after interpolation weights */
m3=m10*ynumy+m20;
*EXY=(1.0-e)*(1.0-n)*exy[m3]+(1.0-e)*n*exy[m3+1]+e*(1.0-n)*exy[m3+ynumy]+e*n*exy[m3+ynumy+1];
*EXYE=(1.0-e)*(1.0-n)*exye[m3]+(1.0-e)*n*exye[m3+1]+e*(1.0-n)*exye[m3+ynumy]+e*n*exye[m3+ynumy+1];
*SXY=(1.0-e)*(1.0-n)*sxy[m3]+(1.0-e)*n*sxy[m3+1]+e*(1.0-n)*sxy[m3+ynumy]+e*n*sxy[m3+ynumy+1];
*SXYE=(1.0-e)*(1.0-n)*sxye[m3]+(1.0-e)*n*sxye[m3+1]+e*(1.0-n)*sxye[m3+ynumy]+e*n*sxye[m3+ynumy+1];
/* End SIGxy interpolation ------------------------ */
/**/
/**/
/**/
/* SIGxx,SIGyy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
*EXX=*SXX=*PR=*SXXE=*SPPE=*EXXE=0;
*VX=*MVX=0;
*VY=*MVY=0;
/* Horizontal,Vertical limits for interpolation calc */
if(x>(gx[m10]+gx[m10+1])/2.0) m10++;
if(y>(gy[m20]+gy[m20+1])/2.0) m20++;
if(m10<1) m10=1; if(m10>xnumx-2) m10=xnumx-2;
if(m20<1) m20=1; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10-1]+gx[m10])/2.0)/((gx[m10+1]-gx[m10-1])/2.0);
n=(y-(gy[m20-1]+gy[m20])/2.0)/((gy[m20+1]-gy[m20-1])/2.0);
/* P interpolation ------------------------ */
m3=m10*ynumy+m20;
*EXX =(1.0-e)*(1.0-n)*exx[m3]+(1.0-e)*n*exx[m3+1]+e*(1.0-n)*exx[m3+ynumy]+e*n*exx[m3+ynumy+1];
*SXX =(1.0-e)*(1.0-n)*sxx[m3]+(1.0-e)*n*sxx[m3+1]+e*(1.0-n)*sxx[m3+ynumy]+e*n*sxx[m3+ynumy+1];
*PR =(1.0-e)*(1.0-n)*pr[m3]+(1.0-e)*n*pr[m3+1]+e*(1.0-n)*pr[m3+ynumy]+e*n*pr[m3+ynumy+1];
*SPPE=(1.0-e)*(1.0-n)*sppe[m3]+(1.0-e)*n*sppe[m3+1]+e*(1.0-n)*sppe[m3+ynumy]+e*n*sppe[m3+ynumy+1];
*SXXE=(1.0-e)*(1.0-n)*sxxe[m3]+(1.0-e)*n*sxxe[m3+1]+e*(1.0-n)*sxxe[m3+ynumy]+e*n*sxxe[m3+ynumy+1];
*EXXE=(1.0-e)*(1.0-n)*exxe[m3]+(1.0-e)*n*exxe[m3+1]+e*(1.0-n)*exxe[m3+ynumy]+e*n*exxe[m3+ynumy+1];
*VX=( (1.0-e)*(1.0-n)*(vx[m3-1]+vx[m3-ynumy-1])+(1.0-e)*n*(vx[m3]+vx[m3-ynumy]) +e*(1.0-n)*(vx[m3+ynumy-1]+vx[m3-1])+e*n*(vx[m3+ynumy]+vx[m3]) )*0.5 *(1.0-xrat);
*VY=( (1.0-e)*(1.0-n)*(vy[m3-ynumy]+vy[m3-ynumy-1])+(1.0-e)*n*(vy[m3-ynumy+1]+vy[m3-ynumy]) +e*(1.0-n)*(vy[m3]+vy[m3-1])+e*n*(vy[m3+1]+vy[m3]) )*0.5 *(1.0-xrat);
*MVX=( (1.0-e)*(1.0-n)*(mvx[m3-1]+mvx[m3-ynumy-1])+(1.0-e)*n*(mvx[m3]+mvx[m3-ynumy]) +e*(1.0-n)*(mvx[m3+ynumy-1]+mvx[m3-1])+e*n*(mvx[m3+ynumy]+mvx[m3]) )*0.5 *(1.0-xrat);
*MVY=( (1.0-e)*(1.0-n)*(mvy[m3-ynumy]+mvy[m3-ynumy-1])+(1.0-e)*n*(mvy[m3-ynumy+1]+mvy[m3-ynumy]) +e*(1.0-n)*(mvy[m3]+mvy[m3-1])+e*n*(mvy[m3+1]+mvy[m3]) )*0.5 *(1.0-xrat);
/* End SIGxx,SIGyy interpolation ------------------------ */
/**/
/**/
/**/
/* Vx interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(y<(gy[m20]+gy[m20+1])/2.0) m20-=1;
if(m10<0) m10=0; if(m10>xnumx-2) m10=xnumx-2;
if(m20<0) m20=0; if(m20>ynumy-3) m20=ynumy-3;
/* Calc normalized distances */
e=(x-gx[m10])/(gx[m10+1]-gx[m10]);
n=(y-(gy[m20]+gy[m20+1])/2.0)/((gy[m20+2]-gy[m20])/2.0);
/* Vx interpolation ------------------------ */
m3=m10*ynumy+m20;
//*VX=(1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1];
//*MVX=(1.0-e)*(1.0-n)*mvx[m3]+(1.0-e)*n*mvx[m3+1]+e*(1.0-n)*mvx[m3+ynumy]+e*n*mvx[m3+ynumy+1];
// Include small weight (xrat) from farther away nodes for velocities
*VX+=((1.0-e)*(1.0-n)*vx[m3]+(1.0-e)*n*vx[m3+1]+e*(1.0-n)*vx[m3+ynumy]+e*n*vx[m3+ynumy+1]) *xrat;
*MVX+=((1.0-e)*(1.0-n)*mvx[m3]+(1.0-e)*n*mvx[m3+1]+e*(1.0-n)*mvx[m3+ynumy]+e*n*mvx[m3+ynumy+1]) *xrat;
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
// Reset and clear buffer
m10=m1;
m20=m2;
/* Horizontal,Vertical limits for interpolation calc */
if(x<(gx[m10]+gx[m10+1])/2.0) m10-=1;
if(m10<0) m10=0; if(m10>xnumx-3) m10=xnumx-3;
if(m20<0) m20=0; if(m20>ynumy-2) m20=ynumy-2;
/* Calc normalized distances */
e=(x-(gx[m10]+gx[m10+1])/2.0)/((gx[m10+2]-gx[m10])/2.0);
n=(y-gy[m20])/(gy[m20+1]-gy[m20]);
/* Vy interpolation ------------------------ */
m3=m10*ynumy+m20;
//*VY=(1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1];
//*MVY=(1.0-e)*(1.0-n)*mvy[m3]+(1.0-e)*n*mvy[m3+1]+e*(1.0-n)*mvy[m3+ynumy]+e*n*mvy[m3+ynumy+1];
// Include small weight (xrat) from farther away nodes for velocities
*VY+=((1.0-e)*(1.0-n)*vy[m3]+(1.0-e)*n*vy[m3+1]+e*(1.0-n)*vy[m3+ynumy]+e*n*vy[m3+ynumy+1]) *xrat;
*MVY+=((1.0-e)*(1.0-n)*mvy[m3]+(1.0-e)*n*mvy[m3+1]+e*(1.0-n)*mvy[m3+ynumy]+e*n*mvy[m3+ynumy+1]) *xrat;
/* End Vy interpolation ------------------------ */
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
*/
}
/* Calculation of SIGij by Interpolation */
/* Calculation of Vx,Vy, EPSxx*SIGxx,EPSyy*SIGyy,EPSxy*SIGxy by Interpolation */
// Not adapted for parallelization
void allinters(double x, double y)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3,m10,m20,m1min,m1max,m2min,m2max;
/* en-NormalisedDistance */
double ival;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Up Left Node X,Y Num */
wn[0]=m10=m1serch(x);
wn[1]=m20=m2serch(y);
/**/
/**/
/**/
/* SIGxy*EPSxy interpolation ------------------------ */
/* Buffer clear */
eps[13]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(m1min<1) m1min=1; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(m2min<1) m2min=1; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,0);
/**/
/* SIGxy,EPSxy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[13]+=ival*sxy[m3]*sxy[m3]/(2.0*nu[m3]);
}
/* End SIGxy*EPSxy interpolation ------------------------ */
/**/
/**/
/**/
/* SIGxx*EPSxx, SIGyy*EPSyy interpolation ------------------------ */
/* Buffer clear */
eps[14]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x>(gx[m10]+gx[m10+1])/2.0) m1min+=1;
if(m1min<1) m1min=1; if(m1min>xnumx-2) m1min=xnumx-2;
m1max=m1min+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(y>(gy[m20]+gy[m20+1])/2.0) m2min+=1;
if(m2min<1) m2min=1; if(m2min>ynumy-2) m2min=ynumy-2;
m2max=m2min+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,-1,-1);
/**/
/* SIGxx,EPSxx,SIGyy,EPSyy,P Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[14]+=ival*sxx[m3]*sxx[m3]/(2.0*nd[m3]);
}
/* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */
/**/
/**/
/**/
/* Vx interpolation ------------------------ */
/* Buffer clear */
eps[11]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10-intermod; if(m1min<0) m1min=0;
m1max=m10+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
/**/
m2min=m20; if(y<(gy[m20]+gy[m20+1])/2.0) m2min-=1;
if(m2min<0) m2min=0; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<0) m2min=0;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,+1);
/**/
/* Vx Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[11]+=ival*vx[m3];
}
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
/* Buffer clear */
eps[12]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x<(gx[m10]+gx[m10+1])/2.0) m1min-=1;
if(m1min<0) m1min=0; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<0) m1min=0;
/**/
m2min=m20-intermod; if(m2min<0) m2min=0;
m2max=m20+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,+1,0);
/**/
/* Vy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[12]+=ival*vy[m3];
}
/* End Vy interpolation ------------------------ */
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
*/
}
/* Calculation of Vx,Vy, EPSxx*SIGxx,EPSyy*SIGyy,EPSxy*SIGxy by Interpolation */
/* Calculation of Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy by Interpolation */
// Not adapted for parallelization
void allinteri(double x, double y)
/* x,y - XY location of point for Vx,Vy calc */
{
/* Counters */
long int m1,m2,m3,m10,m20,m1min,m1max,m2min,m2max;
/* en-NormalisedDistance */
double ival,xrat;
/**/
/**/
/* Check X,Y */
if(x<0) x=0; else if(x>xsize) x=xsize;
if(y<0) y=0; else if(y>ysize) y=ysize;
/**/
/**/
/**/
/* Check weighting for interpolation */
xrat=2.0/3.0;
if(x<(gx[0]+gx[1])/2.0) xrat=1.0;
if(x>(gx[xnumx-2]+gx[xnumx-1])/2.0) xrat=1.0;
if(y<(gy[0]+gy[1])/2.0) xrat=1.0;
if(y>(gy[ynumy-2]+gy[ynumy-1])/2.0) xrat=1.0;
/**/
/**/
/**/
/* Up Left Node X,Y Num */
wn[0]=m10=m1serch(x);
wn[1]=m20=m2serch(y);
/**/
/**/
/**/
/* EPSxy, SPINxy interpolation ------------------------ */
/* Buffer clear */
eps[4]=eps[30]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(m1min<1) m1min=1; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(m2min<1) m2min=1; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,0);
/**/
/* SIGxy,EPSxy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[4]+=ival*exy[m3];
eps[30]+=ival*esp[m3];
}
/* End SIGxy*EPSxy interpolation ------------------------ */
/**/
/**/
/**/
/* EPSxx, EPSyy, P interpolation ------------------------ */
/* Buffer clear */
eps[6]=eps[10]=eps[11]=eps[12]=0;
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x>(gx[m10]+gx[m10+1])/2.0) m1min+=1;
if(m1min<1) m1min=1; if(m1min>xnumx-2) m1min=xnumx-2;
m1max=m1min+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
m1min=m1min-intermod; if(m1min<1) m1min=1;
/**/
m2min=m20; if(y>(gy[m20]+gy[m20+1])/2.0) m2min+=1;
if(m2min<1) m2min=1; if(m2min>ynumy-2) m2min=ynumy-2;
m2max=m2min+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
m2min=m2min-intermod; if(m2min<1) m2min=1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,-1,-1);
/**/
/* SIGxx,EPSxx,SIGyy,EPSyy,P Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[6]+=ival*exx[m3];
eps[10]+=ival*pr[m3];
eps[11]+=ival*(vx[m3-1]+vx[m3-ynumy-1])*0.5*(1.0-xrat);
eps[12]+=ival*(vy[m3-ynumy]+vy[m3-ynumy-1])*0.5*(1.0-xrat);
}
/* End SIGxx*EPSxx,SIGyy*EPSyy interpolation ------------------------ */
/*
depthp(x,y);eps[10]=eps[50];
*/
/**/
/**/
/**/
/* Vx interpolation ------------------------ */
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10-intermod; if(m1min<0) m1min=0;
m1max=m10+1+intermod; if(m1max>xnumx-1) m1max=xnumx-1;
/**/
m2min=m20; if(y<(gy[m20]+gy[m20+1])/2.0) m2min-=1;
if(m2min<0) m2min=0; if(m2min>ynumy-3) m2min=ynumy-3;
m2max=m2min+1+intermod; if(m2max>ynumy-2) m2max=ynumy-2;
m2min=m2min-intermod; if(m2min<0) m2min=0;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,0,+1);
/**/
/* Vx Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[11]+=ival*vx[m3]*xrat;
}
/* End Vx interpolation ------------------------ */
/**/
/**/
/**/
/* Vy interpolation ------------------------ */
/* Horizontal,Vertical limits for interpolation calc */
m1min=m10; if(x<(gx[m10]+gx[m10+1])/2.0) m1min-=1;
if(m1min<0) m1min=0; if(m1min>xnumx-3) m1min=xnumx-3;
m1max=m1min+1+intermod; if(m1max>xnumx-2) m1max=xnumx-2;
m1min=m1min-intermod; if(m1min<0) m1min=0;
/**/
m2min=m20-intermod; if(m2min<0) m2min=0;
m2max=m20+1+intermod; if(m2max>ynumy-1) m2max=ynumy-1;
/**/
/* Interpolation weights calc after Fornberg (1996) */
nodewt(m1min,m1max,m2min,m2max,x,y,+1,0);
/**/
/* Vy Interpolate after interpolation weights */
for (m1=m1min;m1<=m1max;m1++)
for (m2=m2min;m2<=m2max;m2++)
{
/* Current node num, wt */
m3=m1*ynumy+m2;
ival=cn[m1-m1min][1]*cn[m2-m2min][0];
eps[12]+=ival*vy[m3]*xrat;
}
/* End Vy interpolation ------------------------ */
/*
fprintf(fp_log,"eps %e %e ",m1,m2,e,n); getchar();
*/
}
/* Calculation of Vx,Vy, EPSxx,EPSyy,EPSxy, SPINxy by Interpolation */
|
rmse.c | /*************************************************************************/
/** File: rmse.c **/
/** Description: calculate root mean squared error of particular **/
/** clustering. **/
/** Author: Sang-Ha Lee **/
/** University of Virginia. **/
/** **/
/** Note: euclid_dist_2() and find_nearest_point() adopted from **/
/** Minebench code. **/
/** **/
/*************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "kmeans.h"
extern double wtime(void);
/*----< euclid_dist_2() >----------------------------------------------------*/
/* multi-dimensional spatial Euclid distance square */
__inline
float euclid_dist_2(float *pt1,
float *pt2,
int numdims)
{
int i;
float ans=0.0;
for (i=0; i<numdims; i++)
ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]);
return(ans);
}
/*----< find_nearest_point() >-----------------------------------------------*/
__inline
int find_nearest_point(float *pt, /* [nfeatures] */
int nfeatures,
float **pts, /* [npts][nfeatures] */
int npts)
{
int index, i;
float max_dist=FLT_MAX;
/* find the cluster center id with min distance to pt */
for (i=0; i<npts; i++) {
float dist;
dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */
if (dist < max_dist) {
max_dist = dist;
index = i;
}
}
return(index);
}
/*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/
float rms_err (float **feature, /* [npoints][nfeatures] */
int nfeatures,
int npoints,
float **cluster_centres, /* [nclusters][nfeatures] */
int nclusters)
{
int i;
int nearest_cluster_index; /* cluster center id with min distance to pt */
float sum_euclid = 0.0; /* sum of Euclidean distance squares */
float ret; /* return value */
/* calculate and sum the sqaure of euclidean distance*/
#pragma omp parallel for \
shared(feature,cluster_centres) \
firstprivate(npoints,nfeatures,nclusters) \
private(i, nearest_cluster_index) \
schedule (static)
for (i=0; i<npoints; i++) {
nearest_cluster_index = find_nearest_point(feature[i],
nfeatures,
cluster_centres,
nclusters);
sum_euclid += euclid_dist_2(feature[i],
cluster_centres[nearest_cluster_index],
nfeatures);
}
/* divide by n, then take sqrt */
ret = sqrt(sum_euclid / npoints);
return(ret);
}
|
primos.c | #include<stdlib.h>
#include<stdio.h>
#include<omp.h>
int main (int argc, const char** argv) {
if (argc < 2) {
printf("Usage ./%s <number_of_threads>\n", argv[0]);
exit(0);
}
printf("Number of processors: %d\n", omp_get_num_procs());
const int NUM_TH = atoi(argv[1]);
const int intervalo = 5000;
double starttime, stoptime;
int i,j,k;
int prime;
int total;
starttime = omp_get_wtime();
omp_set_num_threads(NUM_TH);
#pragma omp parallel private ( i, j, k, prime )
#pragma omp for schedule (dynamic)
for (k = 1 ; k <= intervalo ; k++) {
total = 0;
for ( i = 2; i <= k ; i++ ) {
prime = 1;
for ( j = 2; j < i; j++ ) {
if ( i % j == 0 ) {
prime = 0;
break;
}
}
total = total + prime;
}
printf("O número de primos do intervalo [1-%d] é %d\n", k, total);
}
stoptime = omp_get_wtime();
printf("Tempo de execução: %3.2f segundos\n", stoptime-starttime);
return(0);
}
|
main.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 1024
int a[N][N], b[N][N];
int seq[N][N];
int reduce[N][N];
int transpose_reduce_shared[N][N];
double cal_time(struct timespec *t_end, struct timespec *t_start)
{
double elapsedTime;
elapsedTime = (t_end->tv_sec - t_start->tv_sec) * 1000.0;
elapsedTime += (t_end->tv_nsec - t_start->tv_nsec) / 1000000.0;
return elapsedTime;
}
int test()
{
struct timespec t_start, t_end;
int i, j, f, k;
// Generate data
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
{
a[i][j] = rand() % N;
b[i][j] = rand() % N;
}
// Sequential
clock_gettime(CLOCK_REALTIME, &t_start);
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
{
seq[i][j] = 0;
for (k = 0; k < N; k++)
seq[i][j] += a[i][k] * b[k][j];
}
clock_gettime(CLOCK_REALTIME, &t_end);
double final_seq = cal_time(&t_end, &t_start);
printf("Sequential time: %lf ms\n", final_seq);
// Reduce access times + Parallel
clock_gettime(CLOCK_REALTIME, &t_start);
#pragma omp parallel for collapse(2) shared(reduce, a, b) schedule(dynamic, 16)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int sum = 0;
for (int k = 0; k < N; k++)
{
sum += a[i][k] * b[k][j];
}
reduce[i][j] = sum;
}
}
clock_gettime(CLOCK_REALTIME, &t_end);
double final_reduce = cal_time(&t_end, &t_start);
printf("Parallel reduce time: %lf ms\n", final_reduce);
// Transpose + Reduce + parallel
clock_gettime(CLOCK_REALTIME, &t_start);
for (int i = 0; i < N; i++)
{
for (int j = i + 1; j < N; j++)
{
int temp = b[i][j];
b[i][j] = b[j][i];
b[j][i] = temp;
}
}
#pragma omp parallel for shared(transpose_reduce_shared, a, b) collapse(2) schedule(dynamic, 16)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
int sum = 0;
transpose_reduce_shared[i][j] = 0;
for (int k = 0; k < N; k++)
{
sum += a[i][k] * b[j][k];
}
transpose_reduce_shared[i][j] = sum;
}
}
clock_gettime(CLOCK_REALTIME, &t_end);
double best = cal_time(&t_end, &t_start);
printf("Parallel transpose_reduce_shared time: %lf ms\n", best);
// Evaluation
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
if (seq[i][j] != reduce[i][j] || seq[i][j] != transpose_reduce_shared[i][j])
{
break;
}
}
}
if (i == N && j == N)
{
printf("Test pass!!!\n");
}
else
{
printf("Test failure..\n");
return 0;
}
}
int main()
{
test();
return 0;
}
|
mandel-omp.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel private(row,col)
{
#pragma omp for schedule(runtime)
for (row = 0; row < height; ++row) {
//#pragma omp task
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
GB_unaryop__ainv_bool_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_fp64
// op(A') function: GB_tran__ainv_bool_fp64
// C type: bool
// A type: double
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_fp64
(
bool *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%200 == 0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
extract_filename_path(filename);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
void extract_filename_path(char* filename){
// insert code here...
char *token;
char save[10];
char *line = filename;
char *slash = "/";
char *search = "_";
char *dot = ".";
char build[50] = "lat:";
char build2[20] = "\nlong:";
//remove uploads
token = strtok(line,slash);
token = strtok(NULL,slash);
strcat(build, token);
//printf("%s\n", build);
token = strtok(build, search);
token = strtok(NULL,search);
//printf("%s\n", token);
token = strtok(token,dot);
strcat(build2, token);
strcat(build2,".");
token = strtok(NULL,dot);
strcat(build2,token);
//printf("%s\n", build2);
strcat(build,build2);
//printf("%s\n", build);
//strcat(build,"\n");
//strcat(build,"long:");
//strcat(build, token);
//printf("%s\n", build);
//printf("%s\n", build);
/*
strcat(build, token);
//token = strtok(NULL, search);
token = strtok(token,dot);
strcat(build,token);
strcat(build,".");
token = strtok(NULL,dot);
strcat(build,token);
*/
//printf("%s\n", build);
FILE *file = fopen("latlng.txt", "w");
int results = fputs(build, file);
if (results == EOF) {
}
fclose(file);
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
sum.c | // Jacobi 3D skeleton program
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timing.h"
int main(int argc, char** argv) {
double wct_start,wct_end,cput_start,cput_end,runtime,r;
int iter,size,i,j,k,n;
double *f1, *f2;
iter=1000;
double mintime = 4.0;
if (argc != 2 && argc != 3) {
printf("Usage: %s <size> [mintime]\n",argv[0]);
exit(1);
}
if (argc == 3) {
mintime = atof(argv[2]);
}
size = atoi(argv[1]);
f1 = malloc((size_t)size*sizeof(double));
f2 = malloc((size_t)size*sizeof(double));
#pragma omp parallel for schedule(static)
for (i = 0; i < size; i++) {
f1[i] = sin( (double) i * i);
f2[i] = cos( (double) 2*i);
}
// time measurement
timing(&wct_start, &cput_start);
while (1) {
timing(&wct_start, &cput_start);
for (j = 0; j < iter; j++) {
#pragma omp parallel for schedule(static)
for (i = 0; i < size; i++) {
f1[i] += f2[i];
}
}
timing(&wct_end, &cput_end);
// making sure mintime was spent, otherwise restart with 2*iter
if (wct_end - wct_start > mintime) {
break;
}
iter = iter * 2;
}
timing(&wct_end, &cput_end);
runtime = wct_end-wct_start;
printf("size:\t%d\ttime/iter:\t%lf\tGFLOP/s:\t%lf\n", size, runtime/iter, ((double)iter) * size * 1e-9 / runtime);
return 0;
}
|
GB_unop__ainv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_bool_bool)
// op(A') function: GB (_unop_tran__ainv_bool_bool)
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_bool_bool)
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_bool_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr33880.c | /* PR middle-end/33880 */
/* { dg-do run } */
extern void abort (void);
void
test1 (void)
{
int i = 0, j = 0;
void bar (void)
{
i++;
j++;
}
bar ();
#pragma omp parallel for num_threads(4)
for (i = 0; i < 100; i++)
#pragma omp atomic
j += 1;
if (j != 101)
abort ();
#pragma omp parallel for lastprivate(i) num_threads(2)
for (i = 0; i < 100; i++)
#pragma omp atomic
j += 1;
if (i != 100)
abort ();
i = 3;
bar ();
if (j != 202)
abort ();
if (i != 4)
abort ();
}
void
test2 (void)
{
int i = -1, j = 99, k, l = 9, m = 0;
void bar (void)
{
i++;
j++;
l++;
m++;
}
bar ();
#pragma omp parallel for num_threads(4)
for (k = i; k < j; k += l)
#pragma omp atomic
m += 1;
bar ();
if (i != 1 || j != 101 || l != 11 || m != 12)
abort ();
}
void
test3 (void)
{
int i, j, k, l, m;
void bar (void)
{
#pragma omp parallel for num_threads(4)
for (i = j; i < k; i += l)
#pragma omp atomic
m += 1;
}
void baz (void)
{
#pragma omp parallel for num_threads(2) lastprivate(i)
for (i = j; i < k * 2; i += l / 2)
#pragma omp atomic
m += 1;
}
i = 7;
j = 0;
k = 100;
l = 2;
m = 0;
bar ();
if (j != 0 || k != 100 || l != 2 || m != 50)
abort ();
baz ();
if (i != 200 || j != 0 || k != 100 || l != 2 || m != 250)
abort ();
}
void
test4 (void)
{
int i, j, k, l, m = 0;
int foo (void)
{
return j;
}
int bar (void)
{
return k;
}
int baz (void)
{
return l;
}
j = 0;
k = 1000;
l = 2;
#pragma omp parallel for num_threads(8) lastprivate(i)
for (i = foo (); i < bar (); i += baz ())
#pragma omp atomic
m += 1;
if (i != 1000 || m != 500 || j != 0 || k != 1000 || l != 2)
abort ();
}
int
main (void)
{
test1 ();
test2 ();
test3 ();
test4 ();
return 0;
}
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
3.0 structure translation: F. Conti
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "../common/npb-C.h"
#include "npbparams.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static double v[NA+1+1]; /* v[1:NA+1] */
static double aelt[NZ+1]; /* aelt[1:NZ] */
static double a[NZ+1]; /* a[1:NZ] */
static double x[NA+2+1]; /* x[1:NA+2] */
static double z[NA+2+1]; /* z[1:NA+2] */
static double p[NA+2+1]; /* p[1:NA+2] */
static double q[NA+2+1]; /* q[1:NA+2] */
static double r[NA+2+1]; /* r[1:NA+2] */
//static double w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static double amult;
static double tran;
/* function declarations */
static void conj_grad (int colidx[], int rowstr[], double x[], double z[],
double a[], double p[], double q[], double r[],
//double w[],
double *rnorm);
static void makea(int n, int nz, double a[], int colidx[], int rowstr[],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, double rcond, int arow[], int acol[],
double aelt[], double v[], int iv[], double shift );
static void sparse(double a[], int colidx[], int rowstr[], int n,
int arow[], int acol[], double aelt[],
int firstrow, int lastrow,
double x[], boolean mark[], int nzloc[], int nnza);
static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
int i, j, k, it;
int nthreads = 1;
double zeta;
double rnorm;
double norm_temp11;
double norm_temp12;
double t, mflops;
char class;
boolean verified;
double zeta_verify_value, epsilon;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
class = 'S';
zeta_verify_value = 8.5971775078648;
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
class = 'W';
zeta_verify_value = 10.362595087124;
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
class = 'A';
zeta_verify_value = 17.130235054029;
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
class = 'B';
zeta_verify_value = 22.712745482631;
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
class = 'C';
zeta_verify_value = 28.973605592845;
} else {
class = 'U';
}
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma omp parallel default(shared) private(i,j,k)
{
#pragma omp for nowait
for (j = 1; j <= lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}// end omp parallel
zeta = 0.0;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad(colidx, rowstr, x, z, a, p, q, r/*, w*/, &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
zeta = SHIFT + 1.0 / norm_temp11;
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of main iter inv pow meth */
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if (class != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void conj_grad (
int colidx[], /* colidx[1:nzz] */
int rowstr[], /* rowstr[1:naa+1] */
double x[], /* x[*] */
double z[], /* z[*] */
double a[], /* a[1:nzz] */
double p[], /* p[*] */
double q[], /* q[*] */
double r[], /* r[*] */
//double w[], /* w[*] */
double *rnorm )
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c Floaging point arrays here are named as in NPB1 spec discussion of
c CG algorithm
c---------------------------------------------------------------------*/
{
static int callcount = 0;
double d, sum, rho, rho0, alpha, beta;
int i, j, k;
int cgit, cgitmax = 25;
rho = 0.0;
#pragma omp parallel default(shared) private(j,sum) shared(rho,naa)
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
{
#pragma omp for
for (j = 1; j <= naa+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
//w[j] = 0.0;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
}/* end omp parallel */
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
rho0 = rho;
d = 0.0;
rho = 0.0;
#pragma omp parallel default(shared) private(j,k,sum,alpha,beta) shared(d,rho0,rho)
{
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
#pragma omp for
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
//w[j] = sum;
q[j] = sum;
}
/* unrolled-by-two version
#pragma omp for private(i,k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
double sum1, sum2;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 2;
sum1 = 0.0;
sum2 = 0.0;
if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) {
sum1 = sum1 + a[k] * p[colidx[k]];
sum2 = sum2 + a[k+1] * p[colidx[k+1]];
}
w[j] = sum1 + sum2;
}
*/
/* unrolled-by-8 version
#pragma omp for private(i,k,sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 8;
sum = 0.0;
for (k = i; k <= i+iresidue-1; k++) {
sum = sum + a[k] * p[colidx[k]];
}
for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) {
sum = sum + a[k ] * p[colidx[k ]]
+ a[k+1] * p[colidx[k+1]]
+ a[k+2] * p[colidx[k+2]]
+ a[k+3] * p[colidx[k+3]]
+ a[k+4] * p[colidx[k+4]]
+ a[k+5] * p[colidx[k+5]]
+ a[k+6] * p[colidx[k+6]]
+ a[k+7] * p[colidx[k+7]];
}
w[j] = sum;
}
*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
*/
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
/*
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0;
}
*/
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = d + p[j]*q[j];
}
#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
//#pragma omp single
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
// }
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {*/
rho = rho + r[j]*r[j];
}
//#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
//#pragma omp single
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
callcount++;
} /* end omp parallel */
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
sum = 0.0;
#pragma omp parallel default(shared) private(j,d) shared(sum)
{
#pragma omp for //private(d, k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:sum)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
} //end omp parallel
(*rnorm) = sqrt(sum);
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
double a[], /* a[1:nz] */
int colidx[], /* colidx[1:nz] */
int rowstr[], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
double rcond,
int arow[], /* arow[1:nz] */
int acol[], /* acol[1:nz] */
double aelt[], /* aelt[1:nz] */
double v[], /* v[1:n+1] */
int iv[], /* iv[1:2*n+1] */
double shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
double size, ratio, scale;
int jcol;
size = 1.0;
ratio = pow(rcond, (1.0 / (double)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
double a[], /* a[1:*] */
int colidx[], /* colidx[1:*] */
int rowstr[], /* rowstr[1:*] */
int n,
int arow[], /* arow[1:*] */
int acol[], /* acol[1:*] */
double aelt[], /* aelt[1:*] */
int firstrow,
int lastrow,
double x[], /* x[1:n] */
boolean mark[], /* mark[1:n] */
int nzloc[], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
double xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c ... preload data pages
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(k,j)
for(j = 0;j <= nrows-1;j++) {
for(k = rowstr[j];k <= rowstr[j+1]-1;k++)
a[k] = 0.0;
}
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
x[i] = 0.0;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0;
if (xi != 0.0) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(double x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
double val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
master-3.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
extern void bar(int);
void foo (void)
{
#pragma omp master
bar(0);
}
/* { dg-final { scan-tree-dump-times "omp_get_thread_num" 1 "ompexp" } } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
stationary_cython.c | /* Generated by Cython 0.29.21 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_21"
#define CYTHON_HEX_VERSION 0x001D15F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__GPy__kern__src__stationary_cython
#define __PYX_HAVE_API__GPy__kern__src__stationary_cython
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ndarrayobject.h"
#include "numpy/ndarraytypes.h"
#include "numpy/arrayscalars.h"
#include "numpy/ufuncobject.h"
/* NumPy API declarations from "numpy/__init__.pxd" */
#include "stationary_utils.h"
#include "pythread.h"
#include <stdlib.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"GPy/kern/src/stationary_cython.pyx",
"__init__.pxd",
"stringsource",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":690
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":691
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":692
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":693
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":697
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":698
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":699
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":700
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":704
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":705
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":714
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":715
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":716
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":718
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":719
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":720
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":722
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":723
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":725
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":726
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":727
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "GPy/kern/src/stationary_cython.pyx":11
* np.import_array()
*
* ctypedef np.float64_t DTYPE_t # <<<<<<<<<<<<<<
*
* cdef extern from "stationary_utils.h":
*/
typedef __pyx_t_5numpy_float64_t __pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":729
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":730
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":731
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":733
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* BufferGetAndValidate.proto */
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
((obj == Py_None || obj == NULL) ?\
(__Pyx_ZeroBuffer(buf), 0) :\
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *, int writable_flag);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_generic = 0;
static PyTypeObject *__pyx_ptype_5numpy_number = 0;
static PyTypeObject *__pyx_ptype_5numpy_integer = 0;
static PyTypeObject *__pyx_ptype_5numpy_signedinteger = 0;
static PyTypeObject *__pyx_ptype_5numpy_unsignedinteger = 0;
static PyTypeObject *__pyx_ptype_5numpy_inexact = 0;
static PyTypeObject *__pyx_ptype_5numpy_floating = 0;
static PyTypeObject *__pyx_ptype_5numpy_complexfloating = 0;
static PyTypeObject *__pyx_ptype_5numpy_flexible = 0;
static PyTypeObject *__pyx_ptype_5numpy_character = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void); /*proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'GPy.kern.src.stationary_cython' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "GPy.kern.src.stationary_cython"
extern int __pyx_module_is_main_GPy__kern__src__stationary_cython;
int __pyx_module_is_main_GPy__kern__src__stationary_cython = 0;
/* Implementation of 'GPy.kern.src.stationary_cython' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ImportError;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_D[] = "D";
static const char __pyx_k_M[] = "M";
static const char __pyx_k_N[] = "N";
static const char __pyx_k_O[] = "O";
static const char __pyx_k_Q[] = "Q";
static const char __pyx_k_X[] = "_X";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_d[] = "d";
static const char __pyx_k_m[] = "m";
static const char __pyx_k_n[] = "n";
static const char __pyx_k_q[] = "q";
static const char __pyx_k_X2[] = "_X2";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_nd[] = "nd";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_X_2[] = "X";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_tmp[] = "_tmp";
static const char __pyx_k_X2_2[] = "X2";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_dist[] = "dist";
static const char __pyx_k_grad[] = "_grad";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_gradq[] = "gradq";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_tmp_2[] = "tmp";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_grad_2[] = "grad";
static const char __pyx_k_grad_X[] = "grad_X";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_grad_X_cython[] = "grad_X_cython";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_lengthscale_grads[] = "lengthscale_grads";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_lengthscale_grads_in_c[] = "lengthscale_grads_in_c";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_GPy_kern_src_stationary_cython[] = "GPy.kern.src.stationary_cython";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_GPy_kern_src_stationary_cython_p[] = "GPy/kern/src/stationary_cython.pyx";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_D;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_GPy_kern_src_stationary_cython;
static PyObject *__pyx_kp_s_GPy_kern_src_stationary_cython_p;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_M;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_s_N;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_Q;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_X;
static PyObject *__pyx_n_s_X2;
static PyObject *__pyx_n_s_X2_2;
static PyObject *__pyx_n_s_X_2;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_d;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dist;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_grad;
static PyObject *__pyx_n_s_grad_2;
static PyObject *__pyx_n_s_grad_X;
static PyObject *__pyx_n_s_grad_X_cython;
static PyObject *__pyx_n_s_gradq;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_lengthscale_grads;
static PyObject *__pyx_n_s_lengthscale_grads_in_c;
static PyObject *__pyx_n_s_m;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_nd;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_q;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_tmp;
static PyObject *__pyx_n_s_tmp_2;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_grad_X(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_N, int __pyx_v_D, int __pyx_v_M, PyArrayObject *__pyx_v__X, PyArrayObject *__pyx_v__X2, PyArrayObject *__pyx_v__tmp, PyArrayObject *__pyx_v__grad); /* proto */
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_2grad_X_cython(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED int __pyx_v_N, int __pyx_v_D, int __pyx_v_M, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_X2, __Pyx_memviewslice __pyx_v_tmp, __Pyx_memviewslice __pyx_v_grad); /* proto */
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_4lengthscale_grads_in_c(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_N, int __pyx_v_M, int __pyx_v_Q, PyArrayObject *__pyx_v__tmp, PyArrayObject *__pyx_v__X, PyArrayObject *__pyx_v__X2, PyArrayObject *__pyx_v__grad); /* proto */
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_6lengthscale_grads(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_N, int __pyx_v_M, int __pyx_v_Q, __Pyx_memviewslice __pyx_v_tmp, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_X2, __Pyx_memviewslice __pyx_v_grad); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__17;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_tuple__32;
static PyObject *__pyx_tuple__33;
static PyObject *__pyx_tuple__34;
static PyObject *__pyx_codeobj__22;
static PyObject *__pyx_codeobj__24;
static PyObject *__pyx_codeobj__26;
static PyObject *__pyx_codeobj__28;
static PyObject *__pyx_codeobj__35;
/* Late includes */
/* "GPy/kern/src/stationary_cython.pyx":19
* void _lengthscale_grads "_lengthscale_grads" (int N, int M, int Q, double* tmp, double* X, double* X2, double* grad) nogil
*
* def grad_X(int N, int D, int M, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _X,
* np.ndarray[DTYPE_t, ndim=2] _X2,
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_1grad_X(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4kern_3src_17stationary_cython_1grad_X = {"grad_X", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4kern_3src_17stationary_cython_1grad_X, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_1grad_X(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
int __pyx_v_N;
int __pyx_v_D;
int __pyx_v_M;
PyArrayObject *__pyx_v__X = 0;
PyArrayObject *__pyx_v__X2 = 0;
PyArrayObject *__pyx_v__tmp = 0;
PyArrayObject *__pyx_v__grad = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("grad_X (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_N,&__pyx_n_s_D,&__pyx_n_s_M,&__pyx_n_s_X,&__pyx_n_s_X2,&__pyx_n_s_tmp,&__pyx_n_s_grad,0};
PyObject* values[7] = {0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_N)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_D)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, 1); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, 2); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, 3); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, 4); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tmp)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, 5); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_grad)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, 6); __PYX_ERR(0, 19, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "grad_X") < 0)) __PYX_ERR(0, 19, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 7) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
}
__pyx_v_N = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19, __pyx_L3_error)
__pyx_v_D = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_D == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19, __pyx_L3_error)
__pyx_v_M = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19, __pyx_L3_error)
__pyx_v__X = ((PyArrayObject *)values[3]);
__pyx_v__X2 = ((PyArrayObject *)values[4]);
__pyx_v__tmp = ((PyArrayObject *)values[5]);
__pyx_v__grad = ((PyArrayObject *)values[6]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("grad_X", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 19, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.kern.src.stationary_cython.grad_X", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__X), __pyx_ptype_5numpy_ndarray, 1, "_X", 0))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__X2), __pyx_ptype_5numpy_ndarray, 1, "_X2", 0))) __PYX_ERR(0, 21, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__tmp), __pyx_ptype_5numpy_ndarray, 1, "_tmp", 0))) __PYX_ERR(0, 22, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__grad), __pyx_ptype_5numpy_ndarray, 1, "_grad", 0))) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_r = __pyx_pf_3GPy_4kern_3src_17stationary_cython_grad_X(__pyx_self, __pyx_v_N, __pyx_v_D, __pyx_v_M, __pyx_v__X, __pyx_v__X2, __pyx_v__tmp, __pyx_v__grad);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_grad_X(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_N, int __pyx_v_D, int __pyx_v_M, PyArrayObject *__pyx_v__X, PyArrayObject *__pyx_v__X2, PyArrayObject *__pyx_v__tmp, PyArrayObject *__pyx_v__grad) {
double *__pyx_v_X;
double *__pyx_v_X2;
double *__pyx_v_tmp;
double *__pyx_v_grad;
__Pyx_LocalBuf_ND __pyx_pybuffernd__X;
__Pyx_Buffer __pyx_pybuffer__X;
__Pyx_LocalBuf_ND __pyx_pybuffernd__X2;
__Pyx_Buffer __pyx_pybuffer__X2;
__Pyx_LocalBuf_ND __pyx_pybuffernd__grad;
__Pyx_Buffer __pyx_pybuffer__grad;
__Pyx_LocalBuf_ND __pyx_pybuffernd__tmp;
__Pyx_Buffer __pyx_pybuffer__tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("grad_X", 0);
__pyx_pybuffer__X.pybuffer.buf = NULL;
__pyx_pybuffer__X.refcount = 0;
__pyx_pybuffernd__X.data = NULL;
__pyx_pybuffernd__X.rcbuffer = &__pyx_pybuffer__X;
__pyx_pybuffer__X2.pybuffer.buf = NULL;
__pyx_pybuffer__X2.refcount = 0;
__pyx_pybuffernd__X2.data = NULL;
__pyx_pybuffernd__X2.rcbuffer = &__pyx_pybuffer__X2;
__pyx_pybuffer__tmp.pybuffer.buf = NULL;
__pyx_pybuffer__tmp.refcount = 0;
__pyx_pybuffernd__tmp.data = NULL;
__pyx_pybuffernd__tmp.rcbuffer = &__pyx_pybuffer__tmp;
__pyx_pybuffer__grad.pybuffer.buf = NULL;
__pyx_pybuffer__grad.refcount = 0;
__pyx_pybuffernd__grad.data = NULL;
__pyx_pybuffernd__grad.rcbuffer = &__pyx_pybuffer__grad;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__X.rcbuffer->pybuffer, (PyObject*)__pyx_v__X, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd__X.diminfo[0].strides = __pyx_pybuffernd__X.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__X.diminfo[0].shape = __pyx_pybuffernd__X.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__X.diminfo[1].strides = __pyx_pybuffernd__X.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__X.diminfo[1].shape = __pyx_pybuffernd__X.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__X2.rcbuffer->pybuffer, (PyObject*)__pyx_v__X2, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd__X2.diminfo[0].strides = __pyx_pybuffernd__X2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__X2.diminfo[0].shape = __pyx_pybuffernd__X2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__X2.diminfo[1].strides = __pyx_pybuffernd__X2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__X2.diminfo[1].shape = __pyx_pybuffernd__X2.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__tmp.rcbuffer->pybuffer, (PyObject*)__pyx_v__tmp, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd__tmp.diminfo[0].strides = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__tmp.diminfo[0].shape = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__tmp.diminfo[1].strides = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__tmp.diminfo[1].shape = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__grad.rcbuffer->pybuffer, (PyObject*)__pyx_v__grad, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd__grad.diminfo[0].strides = __pyx_pybuffernd__grad.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__grad.diminfo[0].shape = __pyx_pybuffernd__grad.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__grad.diminfo[1].strides = __pyx_pybuffernd__grad.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__grad.diminfo[1].shape = __pyx_pybuffernd__grad.rcbuffer->pybuffer.shape[1];
/* "GPy/kern/src/stationary_cython.pyx":24
* np.ndarray[DTYPE_t, ndim=2] _tmp,
* np.ndarray[DTYPE_t, ndim=2] _grad):
* cdef double *X = <double*> _X.data # <<<<<<<<<<<<<<
* cdef double *X2 = <double*> _X2.data
* cdef double *tmp = <double*> _tmp.data
*/
__pyx_v_X = ((double *)__pyx_v__X->data);
/* "GPy/kern/src/stationary_cython.pyx":25
* np.ndarray[DTYPE_t, ndim=2] _grad):
* cdef double *X = <double*> _X.data
* cdef double *X2 = <double*> _X2.data # <<<<<<<<<<<<<<
* cdef double *tmp = <double*> _tmp.data
* cdef double *grad = <double*> _grad.data
*/
__pyx_v_X2 = ((double *)__pyx_v__X2->data);
/* "GPy/kern/src/stationary_cython.pyx":26
* cdef double *X = <double*> _X.data
* cdef double *X2 = <double*> _X2.data
* cdef double *tmp = <double*> _tmp.data # <<<<<<<<<<<<<<
* cdef double *grad = <double*> _grad.data
* with nogil:
*/
__pyx_v_tmp = ((double *)__pyx_v__tmp->data);
/* "GPy/kern/src/stationary_cython.pyx":27
* cdef double *X2 = <double*> _X2.data
* cdef double *tmp = <double*> _tmp.data
* cdef double *grad = <double*> _grad.data # <<<<<<<<<<<<<<
* with nogil:
* _grad_X(N, D, M, X, X2, tmp, grad) # return nothing, work in place.
*/
__pyx_v_grad = ((double *)__pyx_v__grad->data);
/* "GPy/kern/src/stationary_cython.pyx":28
* cdef double *tmp = <double*> _tmp.data
* cdef double *grad = <double*> _grad.data
* with nogil: # <<<<<<<<<<<<<<
* _grad_X(N, D, M, X, X2, tmp, grad) # return nothing, work in place.
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
/* "GPy/kern/src/stationary_cython.pyx":29
* cdef double *grad = <double*> _grad.data
* with nogil:
* _grad_X(N, D, M, X, X2, tmp, grad) # return nothing, work in place. # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
_grad_X(__pyx_v_N, __pyx_v_D, __pyx_v_M, __pyx_v_X, __pyx_v_X2, __pyx_v_tmp, __pyx_v_grad);
}
/* "GPy/kern/src/stationary_cython.pyx":28
* cdef double *tmp = <double*> _tmp.data
* cdef double *grad = <double*> _grad.data
* with nogil: # <<<<<<<<<<<<<<
* _grad_X(N, D, M, X, X2, tmp, grad) # return nothing, work in place.
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/kern/src/stationary_cython.pyx":19
* void _lengthscale_grads "_lengthscale_grads" (int N, int M, int Q, double* tmp, double* X, double* X2, double* grad) nogil
*
* def grad_X(int N, int D, int M, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _X,
* np.ndarray[DTYPE_t, ndim=2] _X2,
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X2.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__grad.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__tmp.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("GPy.kern.src.stationary_cython.grad_X", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X2.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__grad.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__tmp.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/kern/src/stationary_cython.pyx":32
*
* @cython.cdivision(True)
* def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad): # <<<<<<<<<<<<<<
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True):
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_3grad_X_cython(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4kern_3src_17stationary_cython_3grad_X_cython = {"grad_X_cython", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4kern_3src_17stationary_cython_3grad_X_cython, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_3grad_X_cython(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
CYTHON_UNUSED int __pyx_v_N;
int __pyx_v_D;
int __pyx_v_M;
__Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_X2 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_tmp = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_grad = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("grad_X_cython (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_N,&__pyx_n_s_D,&__pyx_n_s_M,&__pyx_n_s_X_2,&__pyx_n_s_X2_2,&__pyx_n_s_tmp_2,&__pyx_n_s_grad_2,0};
PyObject* values[7] = {0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_N)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_D)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, 1); __PYX_ERR(0, 32, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, 2); __PYX_ERR(0, 32, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, 3); __PYX_ERR(0, 32, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X2_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, 4); __PYX_ERR(0, 32, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tmp_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, 5); __PYX_ERR(0, 32, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_grad_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, 6); __PYX_ERR(0, 32, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "grad_X_cython") < 0)) __PYX_ERR(0, 32, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 7) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
}
__pyx_v_N = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_v_D = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_D == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_v_M = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_v_X2 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_X2.memview)) __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_v_tmp = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_tmp.memview)) __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_v_grad = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[6], PyBUF_WRITABLE); if (unlikely(!__pyx_v_grad.memview)) __PYX_ERR(0, 32, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("grad_X_cython", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.kern.src.stationary_cython.grad_X_cython", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4kern_3src_17stationary_cython_2grad_X_cython(__pyx_self, __pyx_v_N, __pyx_v_D, __pyx_v_M, __pyx_v_X, __pyx_v_X2, __pyx_v_tmp, __pyx_v_grad);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_2grad_X_cython(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED int __pyx_v_N, int __pyx_v_D, int __pyx_v_M, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_X2, __Pyx_memviewslice __pyx_v_tmp, __Pyx_memviewslice __pyx_v_grad) {
int __pyx_v_n;
int __pyx_v_d;
int __pyx_v_nd;
int __pyx_v_m;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
Py_ssize_t __pyx_t_14;
__Pyx_RefNannySetupContext("grad_X_cython", 0);
/* "GPy/kern/src/stationary_cython.pyx":34
* def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad):
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True): # <<<<<<<<<<<<<<
* n = nd / D
* d = nd % D
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = (__pyx_v_N * __pyx_v_D);
if ((1 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_d) lastprivate(__pyx_v_m) lastprivate(__pyx_v_n) firstprivate(__pyx_v_nd) lastprivate(__pyx_v_nd)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_nd = (int)(0 + 1 * __pyx_t_2);
/* Initialize private variables to invalid values */
__pyx_v_d = ((int)0xbad0bad0);
__pyx_v_m = ((int)0xbad0bad0);
__pyx_v_n = ((int)0xbad0bad0);
/* "GPy/kern/src/stationary_cython.pyx":35
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True):
* n = nd / D # <<<<<<<<<<<<<<
* d = nd % D
* grad[n,d] = 0.0
*/
__pyx_v_n = (__pyx_v_nd / __pyx_v_D);
/* "GPy/kern/src/stationary_cython.pyx":36
* for nd in prange(N * D, nogil=True):
* n = nd / D
* d = nd % D # <<<<<<<<<<<<<<
* grad[n,d] = 0.0
* for m in range(M):
*/
__pyx_v_d = (__pyx_v_nd % __pyx_v_D);
/* "GPy/kern/src/stationary_cython.pyx":37
* n = nd / D
* d = nd % D
* grad[n,d] = 0.0 # <<<<<<<<<<<<<<
* for m in range(M):
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d])
*/
__pyx_t_4 = __pyx_v_n;
__pyx_t_5 = __pyx_v_d;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_grad.data + __pyx_t_4 * __pyx_v_grad.strides[0]) ) + __pyx_t_5 * __pyx_v_grad.strides[1]) )) = 0.0;
/* "GPy/kern/src/stationary_cython.pyx":38
* d = nd % D
* grad[n,d] = 0.0
* for m in range(M): # <<<<<<<<<<<<<<
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d])
*
*/
__pyx_t_6 = __pyx_v_M;
__pyx_t_7 = __pyx_t_6;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_m = __pyx_t_8;
/* "GPy/kern/src/stationary_cython.pyx":39
* grad[n,d] = 0.0
* for m in range(M):
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d]) # <<<<<<<<<<<<<<
*
* def lengthscale_grads_in_c(int N, int M, int Q,
*/
__pyx_t_5 = __pyx_v_n;
__pyx_t_4 = __pyx_v_m;
__pyx_t_9 = __pyx_v_n;
__pyx_t_10 = __pyx_v_d;
__pyx_t_11 = __pyx_v_m;
__pyx_t_12 = __pyx_v_d;
__pyx_t_13 = __pyx_v_n;
__pyx_t_14 = __pyx_v_d;
*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_grad.data + __pyx_t_13 * __pyx_v_grad.strides[0]) ) + __pyx_t_14 * __pyx_v_grad.strides[1]) )) += ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tmp.data + __pyx_t_5 * __pyx_v_tmp.strides[0]) ) + __pyx_t_4 * __pyx_v_tmp.strides[1]) ))) * ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X.data + __pyx_t_9 * __pyx_v_X.strides[0]) ) + __pyx_t_10 * __pyx_v_X.strides[1]) ))) - (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X2.data + __pyx_t_11 * __pyx_v_X2.strides[0]) ) + __pyx_t_12 * __pyx_v_X2.strides[1]) )))));
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "GPy/kern/src/stationary_cython.pyx":34
* def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad):
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True): # <<<<<<<<<<<<<<
* n = nd / D
* d = nd % D
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/kern/src/stationary_cython.pyx":32
*
* @cython.cdivision(True)
* def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad): # <<<<<<<<<<<<<<
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__PYX_XDEC_MEMVIEW(&__pyx_v_X, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_X2, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_tmp, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_grad, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/kern/src/stationary_cython.pyx":41
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d])
*
* def lengthscale_grads_in_c(int N, int M, int Q, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _tmp,
* np.ndarray[DTYPE_t, ndim=2] _X,
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_5lengthscale_grads_in_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4kern_3src_17stationary_cython_5lengthscale_grads_in_c = {"lengthscale_grads_in_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4kern_3src_17stationary_cython_5lengthscale_grads_in_c, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_5lengthscale_grads_in_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
int __pyx_v_N;
int __pyx_v_M;
int __pyx_v_Q;
PyArrayObject *__pyx_v__tmp = 0;
PyArrayObject *__pyx_v__X = 0;
PyArrayObject *__pyx_v__X2 = 0;
PyArrayObject *__pyx_v__grad = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lengthscale_grads_in_c (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_N,&__pyx_n_s_M,&__pyx_n_s_Q,&__pyx_n_s_tmp,&__pyx_n_s_X,&__pyx_n_s_X2,&__pyx_n_s_grad,0};
PyObject* values[7] = {0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_N)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, 1); __PYX_ERR(0, 41, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Q)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, 2); __PYX_ERR(0, 41, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tmp)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, 3); __PYX_ERR(0, 41, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, 4); __PYX_ERR(0, 41, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, 5); __PYX_ERR(0, 41, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_grad)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, 6); __PYX_ERR(0, 41, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lengthscale_grads_in_c") < 0)) __PYX_ERR(0, 41, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 7) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
}
__pyx_v_N = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error)
__pyx_v_M = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error)
__pyx_v_Q = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_Q == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L3_error)
__pyx_v__tmp = ((PyArrayObject *)values[3]);
__pyx_v__X = ((PyArrayObject *)values[4]);
__pyx_v__X2 = ((PyArrayObject *)values[5]);
__pyx_v__grad = ((PyArrayObject *)values[6]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("lengthscale_grads_in_c", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 41, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.kern.src.stationary_cython.lengthscale_grads_in_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__tmp), __pyx_ptype_5numpy_ndarray, 1, "_tmp", 0))) __PYX_ERR(0, 42, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__X), __pyx_ptype_5numpy_ndarray, 1, "_X", 0))) __PYX_ERR(0, 43, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__X2), __pyx_ptype_5numpy_ndarray, 1, "_X2", 0))) __PYX_ERR(0, 44, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v__grad), __pyx_ptype_5numpy_ndarray, 1, "_grad", 0))) __PYX_ERR(0, 45, __pyx_L1_error)
__pyx_r = __pyx_pf_3GPy_4kern_3src_17stationary_cython_4lengthscale_grads_in_c(__pyx_self, __pyx_v_N, __pyx_v_M, __pyx_v_Q, __pyx_v__tmp, __pyx_v__X, __pyx_v__X2, __pyx_v__grad);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_4lengthscale_grads_in_c(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_N, int __pyx_v_M, int __pyx_v_Q, PyArrayObject *__pyx_v__tmp, PyArrayObject *__pyx_v__X, PyArrayObject *__pyx_v__X2, PyArrayObject *__pyx_v__grad) {
double *__pyx_v_tmp;
double *__pyx_v_X;
double *__pyx_v_X2;
double *__pyx_v_grad;
__Pyx_LocalBuf_ND __pyx_pybuffernd__X;
__Pyx_Buffer __pyx_pybuffer__X;
__Pyx_LocalBuf_ND __pyx_pybuffernd__X2;
__Pyx_Buffer __pyx_pybuffer__X2;
__Pyx_LocalBuf_ND __pyx_pybuffernd__grad;
__Pyx_Buffer __pyx_pybuffer__grad;
__Pyx_LocalBuf_ND __pyx_pybuffernd__tmp;
__Pyx_Buffer __pyx_pybuffer__tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("lengthscale_grads_in_c", 0);
__pyx_pybuffer__tmp.pybuffer.buf = NULL;
__pyx_pybuffer__tmp.refcount = 0;
__pyx_pybuffernd__tmp.data = NULL;
__pyx_pybuffernd__tmp.rcbuffer = &__pyx_pybuffer__tmp;
__pyx_pybuffer__X.pybuffer.buf = NULL;
__pyx_pybuffer__X.refcount = 0;
__pyx_pybuffernd__X.data = NULL;
__pyx_pybuffernd__X.rcbuffer = &__pyx_pybuffer__X;
__pyx_pybuffer__X2.pybuffer.buf = NULL;
__pyx_pybuffer__X2.refcount = 0;
__pyx_pybuffernd__X2.data = NULL;
__pyx_pybuffernd__X2.rcbuffer = &__pyx_pybuffer__X2;
__pyx_pybuffer__grad.pybuffer.buf = NULL;
__pyx_pybuffer__grad.refcount = 0;
__pyx_pybuffernd__grad.data = NULL;
__pyx_pybuffernd__grad.rcbuffer = &__pyx_pybuffer__grad;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__tmp.rcbuffer->pybuffer, (PyObject*)__pyx_v__tmp, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 41, __pyx_L1_error)
}
__pyx_pybuffernd__tmp.diminfo[0].strides = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__tmp.diminfo[0].shape = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__tmp.diminfo[1].strides = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__tmp.diminfo[1].shape = __pyx_pybuffernd__tmp.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__X.rcbuffer->pybuffer, (PyObject*)__pyx_v__X, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 41, __pyx_L1_error)
}
__pyx_pybuffernd__X.diminfo[0].strides = __pyx_pybuffernd__X.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__X.diminfo[0].shape = __pyx_pybuffernd__X.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__X.diminfo[1].strides = __pyx_pybuffernd__X.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__X.diminfo[1].shape = __pyx_pybuffernd__X.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__X2.rcbuffer->pybuffer, (PyObject*)__pyx_v__X2, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 41, __pyx_L1_error)
}
__pyx_pybuffernd__X2.diminfo[0].strides = __pyx_pybuffernd__X2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__X2.diminfo[0].shape = __pyx_pybuffernd__X2.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__X2.diminfo[1].strides = __pyx_pybuffernd__X2.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__X2.diminfo[1].shape = __pyx_pybuffernd__X2.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__grad.rcbuffer->pybuffer, (PyObject*)__pyx_v__grad, &__Pyx_TypeInfo_nn___pyx_t_3GPy_4kern_3src_17stationary_cython_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 41, __pyx_L1_error)
}
__pyx_pybuffernd__grad.diminfo[0].strides = __pyx_pybuffernd__grad.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__grad.diminfo[0].shape = __pyx_pybuffernd__grad.rcbuffer->pybuffer.shape[0];
/* "GPy/kern/src/stationary_cython.pyx":46
* np.ndarray[DTYPE_t, ndim=2] _X2,
* np.ndarray[DTYPE_t, ndim=1] _grad):
* cdef double *tmp = <double*> _tmp.data # <<<<<<<<<<<<<<
* cdef double *X = <double*> _X.data
* cdef double *X2 = <double*> _X2.data
*/
__pyx_v_tmp = ((double *)__pyx_v__tmp->data);
/* "GPy/kern/src/stationary_cython.pyx":47
* np.ndarray[DTYPE_t, ndim=1] _grad):
* cdef double *tmp = <double*> _tmp.data
* cdef double *X = <double*> _X.data # <<<<<<<<<<<<<<
* cdef double *X2 = <double*> _X2.data
* cdef double *grad = <double*> _grad.data
*/
__pyx_v_X = ((double *)__pyx_v__X->data);
/* "GPy/kern/src/stationary_cython.pyx":48
* cdef double *tmp = <double*> _tmp.data
* cdef double *X = <double*> _X.data
* cdef double *X2 = <double*> _X2.data # <<<<<<<<<<<<<<
* cdef double *grad = <double*> _grad.data
* with nogil:
*/
__pyx_v_X2 = ((double *)__pyx_v__X2->data);
/* "GPy/kern/src/stationary_cython.pyx":49
* cdef double *X = <double*> _X.data
* cdef double *X2 = <double*> _X2.data
* cdef double *grad = <double*> _grad.data # <<<<<<<<<<<<<<
* with nogil:
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*/
__pyx_v_grad = ((double *)__pyx_v__grad->data);
/* "GPy/kern/src/stationary_cython.pyx":50
* cdef double *X2 = <double*> _X2.data
* cdef double *grad = <double*> _grad.data
* with nogil: # <<<<<<<<<<<<<<
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
/* "GPy/kern/src/stationary_cython.pyx":51
* cdef double *grad = <double*> _grad.data
* with nogil:
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place. # <<<<<<<<<<<<<<
*
* def lengthscale_grads(int N, int M, int Q, double[:,:] tmp, double[:,:] X, double[:,:] X2, double[:] grad):
*/
_lengthscale_grads(__pyx_v_N, __pyx_v_M, __pyx_v_Q, __pyx_v_tmp, __pyx_v_X, __pyx_v_X2, __pyx_v_grad);
}
/* "GPy/kern/src/stationary_cython.pyx":50
* cdef double *X2 = <double*> _X2.data
* cdef double *grad = <double*> _grad.data
* with nogil: # <<<<<<<<<<<<<<
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/kern/src/stationary_cython.pyx":41
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d])
*
* def lengthscale_grads_in_c(int N, int M, int Q, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _tmp,
* np.ndarray[DTYPE_t, ndim=2] _X,
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X2.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__grad.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__tmp.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("GPy.kern.src.stationary_cython.lengthscale_grads_in_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__X2.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__grad.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__tmp.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/kern/src/stationary_cython.pyx":53
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*
* def lengthscale_grads(int N, int M, int Q, double[:,:] tmp, double[:,:] X, double[:,:] X2, double[:] grad): # <<<<<<<<<<<<<<
* cdef int q, n, m
* cdef double gradq, dist
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_7lengthscale_grads(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4kern_3src_17stationary_cython_7lengthscale_grads = {"lengthscale_grads", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4kern_3src_17stationary_cython_7lengthscale_grads, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4kern_3src_17stationary_cython_7lengthscale_grads(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
int __pyx_v_N;
int __pyx_v_M;
int __pyx_v_Q;
__Pyx_memviewslice __pyx_v_tmp = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_X2 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_grad = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("lengthscale_grads (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_N,&__pyx_n_s_M,&__pyx_n_s_Q,&__pyx_n_s_tmp_2,&__pyx_n_s_X_2,&__pyx_n_s_X2_2,&__pyx_n_s_grad_2,0};
PyObject* values[7] = {0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_N)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, 1); __PYX_ERR(0, 53, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Q)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, 2); __PYX_ERR(0, 53, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tmp_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, 3); __PYX_ERR(0, 53, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, 4); __PYX_ERR(0, 53, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_X2_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, 5); __PYX_ERR(0, 53, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_grad_2)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, 6); __PYX_ERR(0, 53, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "lengthscale_grads") < 0)) __PYX_ERR(0, 53, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 7) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
}
__pyx_v_N = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_N == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_v_M = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_v_Q = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_Q == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_v_tmp = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_tmp.memview)) __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_v_X2 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_X2.memview)) __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_v_grad = __Pyx_PyObject_to_MemoryviewSlice_ds_double(values[6], PyBUF_WRITABLE); if (unlikely(!__pyx_v_grad.memview)) __PYX_ERR(0, 53, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("lengthscale_grads", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 53, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.kern.src.stationary_cython.lengthscale_grads", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4kern_3src_17stationary_cython_6lengthscale_grads(__pyx_self, __pyx_v_N, __pyx_v_M, __pyx_v_Q, __pyx_v_tmp, __pyx_v_X, __pyx_v_X2, __pyx_v_grad);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4kern_3src_17stationary_cython_6lengthscale_grads(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_N, int __pyx_v_M, int __pyx_v_Q, __Pyx_memviewslice __pyx_v_tmp, __Pyx_memviewslice __pyx_v_X, __Pyx_memviewslice __pyx_v_X2, __Pyx_memviewslice __pyx_v_grad) {
int __pyx_v_q;
int __pyx_v_n;
int __pyx_v_m;
double __pyx_v_dist;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
Py_ssize_t __pyx_t_13;
__Pyx_RefNannySetupContext("lengthscale_grads", 0);
/* "GPy/kern/src/stationary_cython.pyx":56
* cdef int q, n, m
* cdef double gradq, dist
* with nogil: # <<<<<<<<<<<<<<
* for q in range(Q):
* grad[q] = 0.0
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
/* "GPy/kern/src/stationary_cython.pyx":57
* cdef double gradq, dist
* with nogil:
* for q in range(Q): # <<<<<<<<<<<<<<
* grad[q] = 0.0
* for n in range(N):
*/
__pyx_t_1 = __pyx_v_Q;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_q = __pyx_t_3;
/* "GPy/kern/src/stationary_cython.pyx":58
* with nogil:
* for q in range(Q):
* grad[q] = 0.0 # <<<<<<<<<<<<<<
* for n in range(N):
* for m in range(M):
*/
__pyx_t_4 = __pyx_v_q;
*((double *) ( /* dim=0 */ (__pyx_v_grad.data + __pyx_t_4 * __pyx_v_grad.strides[0]) )) = 0.0;
/* "GPy/kern/src/stationary_cython.pyx":59
* for q in range(Q):
* grad[q] = 0.0
* for n in range(N): # <<<<<<<<<<<<<<
* for m in range(M):
* dist = X[n,q] - X2[m,q]
*/
__pyx_t_5 = __pyx_v_N;
__pyx_t_6 = __pyx_t_5;
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_n = __pyx_t_7;
/* "GPy/kern/src/stationary_cython.pyx":60
* grad[q] = 0.0
* for n in range(N):
* for m in range(M): # <<<<<<<<<<<<<<
* dist = X[n,q] - X2[m,q]
* grad[q] += tmp[n, m] * dist * dist
*/
__pyx_t_8 = __pyx_v_M;
__pyx_t_9 = __pyx_t_8;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_m = __pyx_t_10;
/* "GPy/kern/src/stationary_cython.pyx":61
* for n in range(N):
* for m in range(M):
* dist = X[n,q] - X2[m,q] # <<<<<<<<<<<<<<
* grad[q] += tmp[n, m] * dist * dist
*/
__pyx_t_4 = __pyx_v_n;
__pyx_t_11 = __pyx_v_q;
__pyx_t_12 = __pyx_v_m;
__pyx_t_13 = __pyx_v_q;
__pyx_v_dist = ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X.data + __pyx_t_4 * __pyx_v_X.strides[0]) ) + __pyx_t_11 * __pyx_v_X.strides[1]) ))) - (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_X2.data + __pyx_t_12 * __pyx_v_X2.strides[0]) ) + __pyx_t_13 * __pyx_v_X2.strides[1]) ))));
/* "GPy/kern/src/stationary_cython.pyx":62
* for m in range(M):
* dist = X[n,q] - X2[m,q]
* grad[q] += tmp[n, m] * dist * dist # <<<<<<<<<<<<<<
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_12 = __pyx_v_m;
__pyx_t_11 = __pyx_v_q;
*((double *) ( /* dim=0 */ (__pyx_v_grad.data + __pyx_t_11 * __pyx_v_grad.strides[0]) )) += (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tmp.data + __pyx_t_13 * __pyx_v_tmp.strides[0]) ) + __pyx_t_12 * __pyx_v_tmp.strides[1]) ))) * __pyx_v_dist) * __pyx_v_dist);
}
}
}
}
/* "GPy/kern/src/stationary_cython.pyx":56
* cdef int q, n, m
* cdef double gradq, dist
* with nogil: # <<<<<<<<<<<<<<
* for q in range(Q):
* grad[q] = 0.0
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/kern/src/stationary_cython.pyx":53
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*
* def lengthscale_grads(int N, int M, int Q, double[:,:] tmp, double[:,:] X, double[:,:] X2, double[:] grad): # <<<<<<<<<<<<<<
* cdef int q, n, m
* cdef double gradq, dist
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__PYX_XDEC_MEMVIEW(&__pyx_v_tmp, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_X, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_X2, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_grad, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":735
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":736
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 736, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":735
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":738
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":739
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 739, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":738
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":741
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":742
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 742, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":741
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":744
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":745
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 745, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":744
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":747
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":748
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 748, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":747
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":750
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":751
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":752
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":751
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":754
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":750
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":931
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":932
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":933
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":931
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":935
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":936
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":937
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":938
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":937
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":939
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":935
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":943
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":944
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":945
* cdef inline int import_array() except -1:
* try:
* __pyx_import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 945, __pyx_L3_error)
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":944
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":946
* try:
* __pyx_import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 946, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":947
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 947, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 947, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":944
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":943
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":949
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":950
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":951
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 951, __pyx_L3_error)
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":950
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":952
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 952, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":953
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 953, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 953, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":950
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":949
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":955
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":956
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":957
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L3_error)
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":956
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":958
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 958, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":959
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef extern from *:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 959, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 959, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":956
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":955
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":969
*
*
* cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.timedelta64)`
*/
static CYTHON_INLINE int __pyx_f_5numpy_is_timedelta64_object(PyObject *__pyx_v_obj) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_timedelta64_object", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":981
* bool
* """
* return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyTimedeltaArrType_Type));
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":969
*
*
* cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.timedelta64)`
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":984
*
*
* cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.datetime64)`
*/
static CYTHON_INLINE int __pyx_f_5numpy_is_datetime64_object(PyObject *__pyx_v_obj) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_datetime64_object", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":996
* bool
* """
* return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyDatetimeArrType_Type));
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":984
*
*
* cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<<
* """
* Cython equivalent of `isinstance(obj, np.datetime64)`
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":999
*
*
* cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy datetime64 object
*/
static CYTHON_INLINE npy_datetime __pyx_f_5numpy_get_datetime64_value(PyObject *__pyx_v_obj) {
npy_datetime __pyx_r;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1006
* also needed. That can be found using `get_datetime64_unit`.
* """
* return (<PyDatetimeScalarObject*>obj).obval # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((PyDatetimeScalarObject *)__pyx_v_obj)->obval;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":999
*
*
* cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy datetime64 object
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1009
*
*
* cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy timedelta64 object
*/
static CYTHON_INLINE npy_timedelta __pyx_f_5numpy_get_timedelta64_value(PyObject *__pyx_v_obj) {
npy_timedelta __pyx_r;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1013
* returns the int64 value underlying scalar numpy timedelta64 object
* """
* return (<PyTimedeltaScalarObject*>obj).obval # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((PyTimedeltaScalarObject *)__pyx_v_obj)->obval;
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1009
*
*
* cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the int64 value underlying scalar numpy timedelta64 object
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1016
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
static CYTHON_INLINE NPY_DATETIMEUNIT __pyx_f_5numpy_get_datetime64_unit(PyObject *__pyx_v_obj) {
NPY_DATETIMEUNIT __pyx_r;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1020
* returns the unit part of the dtype for a numpy datetime64 object.
* """
* return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base # <<<<<<<<<<<<<<
*/
__pyx_r = ((NPY_DATETIMEUNIT)((PyDatetimeScalarObject *)__pyx_v_obj)->obmeta.base);
goto __pyx_L0;
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":1016
*
*
* cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<<
* """
* returns the unit part of the dtype for a numpy datetime64 object.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(2, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(2, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(2, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(2, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(2, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(2, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(2, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(2, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(2, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(2, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(2, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(2, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(2, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(2, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(2, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(2, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(2, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(2, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(2, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(2, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(2, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(2, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(2, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(2, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(2, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(2, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(2, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(2, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(2, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(2, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(2, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(2, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(2, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(2, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(2, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(2, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(2, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(2, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(2, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__17);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__17); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(2, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(2, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(2, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(2, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(2, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(2, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(2, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(2, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(2, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(2, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(2, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(2, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(2, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(2, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(2, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(2, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(2, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(2, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(2, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.kern.src.stationary_cython.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.kern.src.stationary_cython.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.kern.src.stationary_cython.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.kern.src.stationary_cython._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_stationary_cython(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_stationary_cython},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"stationary_cython",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_GPy_kern_src_stationary_cython, __pyx_k_GPy_kern_src_stationary_cython, sizeof(__pyx_k_GPy_kern_src_stationary_cython), 0, 0, 1, 1},
{&__pyx_kp_s_GPy_kern_src_stationary_cython_p, __pyx_k_GPy_kern_src_stationary_cython_p, sizeof(__pyx_k_GPy_kern_src_stationary_cython_p), 0, 0, 1, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_Q, __pyx_k_Q, sizeof(__pyx_k_Q), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1},
{&__pyx_n_s_X2, __pyx_k_X2, sizeof(__pyx_k_X2), 0, 0, 1, 1},
{&__pyx_n_s_X2_2, __pyx_k_X2_2, sizeof(__pyx_k_X2_2), 0, 0, 1, 1},
{&__pyx_n_s_X_2, __pyx_k_X_2, sizeof(__pyx_k_X_2), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dist, __pyx_k_dist, sizeof(__pyx_k_dist), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_grad, __pyx_k_grad, sizeof(__pyx_k_grad), 0, 0, 1, 1},
{&__pyx_n_s_grad_2, __pyx_k_grad_2, sizeof(__pyx_k_grad_2), 0, 0, 1, 1},
{&__pyx_n_s_grad_X, __pyx_k_grad_X, sizeof(__pyx_k_grad_X), 0, 0, 1, 1},
{&__pyx_n_s_grad_X_cython, __pyx_k_grad_X_cython, sizeof(__pyx_k_grad_X_cython), 0, 0, 1, 1},
{&__pyx_n_s_gradq, __pyx_k_gradq, sizeof(__pyx_k_gradq), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_lengthscale_grads, __pyx_k_lengthscale_grads, sizeof(__pyx_k_lengthscale_grads), 0, 0, 1, 1},
{&__pyx_n_s_lengthscale_grads_in_c, __pyx_k_lengthscale_grads_in_c, sizeof(__pyx_k_lengthscale_grads_in_c), 0, 0, 1, 1},
{&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_nd, __pyx_k_nd, sizeof(__pyx_k_nd), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_q, __pyx_k_q, sizeof(__pyx_k_q), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_tmp, __pyx_k_tmp, sizeof(__pyx_k_tmp), 0, 0, 1, 1},
{&__pyx_n_s_tmp_2, __pyx_k_tmp_2, sizeof(__pyx_k_tmp_2), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 38, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 947, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(2, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(2, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(2, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(2, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(2, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(2, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(2, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":947
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 947, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../.pyenv/versions/gpy391/lib/python3.9/site-packages/numpy/__init__.pxd":953
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 953, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(2, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(2, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(2, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(2, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(2, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(2, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(2, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) __PYX_ERR(2, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(2, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "GPy/kern/src/stationary_cython.pyx":19
* void _lengthscale_grads "_lengthscale_grads" (int N, int M, int Q, double* tmp, double* X, double* X2, double* grad) nogil
*
* def grad_X(int N, int D, int M, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _X,
* np.ndarray[DTYPE_t, ndim=2] _X2,
*/
__pyx_tuple__21 = PyTuple_Pack(11, __pyx_n_s_N, __pyx_n_s_D, __pyx_n_s_M, __pyx_n_s_X, __pyx_n_s_X2, __pyx_n_s_tmp, __pyx_n_s_grad, __pyx_n_s_X_2, __pyx_n_s_X2_2, __pyx_n_s_tmp_2, __pyx_n_s_grad_2); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(7, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_kern_src_stationary_cython_p, __pyx_n_s_grad_X, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 19, __pyx_L1_error)
/* "GPy/kern/src/stationary_cython.pyx":32
*
* @cython.cdivision(True)
* def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad): # <<<<<<<<<<<<<<
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True):
*/
__pyx_tuple__23 = PyTuple_Pack(11, __pyx_n_s_N, __pyx_n_s_D, __pyx_n_s_M, __pyx_n_s_X_2, __pyx_n_s_X2_2, __pyx_n_s_tmp_2, __pyx_n_s_grad_2, __pyx_n_s_n, __pyx_n_s_d, __pyx_n_s_nd, __pyx_n_s_m); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
__pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(7, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_kern_src_stationary_cython_p, __pyx_n_s_grad_X_cython, 32, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 32, __pyx_L1_error)
/* "GPy/kern/src/stationary_cython.pyx":41
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d])
*
* def lengthscale_grads_in_c(int N, int M, int Q, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _tmp,
* np.ndarray[DTYPE_t, ndim=2] _X,
*/
__pyx_tuple__25 = PyTuple_Pack(11, __pyx_n_s_N, __pyx_n_s_M, __pyx_n_s_Q, __pyx_n_s_tmp, __pyx_n_s_X, __pyx_n_s_X2, __pyx_n_s_grad, __pyx_n_s_tmp_2, __pyx_n_s_X_2, __pyx_n_s_X2_2, __pyx_n_s_grad_2); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 41, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(7, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_kern_src_stationary_cython_p, __pyx_n_s_lengthscale_grads_in_c, 41, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 41, __pyx_L1_error)
/* "GPy/kern/src/stationary_cython.pyx":53
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*
* def lengthscale_grads(int N, int M, int Q, double[:,:] tmp, double[:,:] X, double[:,:] X2, double[:] grad): # <<<<<<<<<<<<<<
* cdef int q, n, m
* cdef double gradq, dist
*/
__pyx_tuple__27 = PyTuple_Pack(12, __pyx_n_s_N, __pyx_n_s_M, __pyx_n_s_Q, __pyx_n_s_tmp_2, __pyx_n_s_X_2, __pyx_n_s_X2_2, __pyx_n_s_grad_2, __pyx_n_s_q, __pyx_n_s_n, __pyx_n_s_m, __pyx_n_s_gradq, __pyx_n_s_dist); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
__pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(7, 0, 12, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_kern_src_stationary_cython_p, __pyx_n_s_lengthscale_grads, 53, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 53, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(2, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(2, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(2, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(2, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__32);
__Pyx_GIVEREF(__pyx_tuple__32);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__33 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(2, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__33);
__Pyx_GIVEREF(__pyx_tuple__33);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__34 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__34);
__Pyx_GIVEREF(__pyx_tuple__34);
__pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 200, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 200, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 223, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 227, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 239, __pyx_L1_error)
__pyx_ptype_5numpy_generic = __Pyx_ImportType(__pyx_t_1, "numpy", "generic", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_generic) __PYX_ERR(1, 771, __pyx_L1_error)
__pyx_ptype_5numpy_number = __Pyx_ImportType(__pyx_t_1, "numpy", "number", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_number) __PYX_ERR(1, 773, __pyx_L1_error)
__pyx_ptype_5numpy_integer = __Pyx_ImportType(__pyx_t_1, "numpy", "integer", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_integer) __PYX_ERR(1, 775, __pyx_L1_error)
__pyx_ptype_5numpy_signedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "signedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_signedinteger) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_ptype_5numpy_unsignedinteger = __Pyx_ImportType(__pyx_t_1, "numpy", "unsignedinteger", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_unsignedinteger) __PYX_ERR(1, 779, __pyx_L1_error)
__pyx_ptype_5numpy_inexact = __Pyx_ImportType(__pyx_t_1, "numpy", "inexact", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_inexact) __PYX_ERR(1, 781, __pyx_L1_error)
__pyx_ptype_5numpy_floating = __Pyx_ImportType(__pyx_t_1, "numpy", "floating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_floating) __PYX_ERR(1, 783, __pyx_L1_error)
__pyx_ptype_5numpy_complexfloating = __Pyx_ImportType(__pyx_t_1, "numpy", "complexfloating", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_complexfloating) __PYX_ERR(1, 785, __pyx_L1_error)
__pyx_ptype_5numpy_flexible = __Pyx_ImportType(__pyx_t_1, "numpy", "flexible", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_flexible) __PYX_ERR(1, 787, __pyx_L1_error)
__pyx_ptype_5numpy_character = __Pyx_ImportType(__pyx_t_1, "numpy", "character", sizeof(PyObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_character) __PYX_ERR(1, 789, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initstationary_cython(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initstationary_cython(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_stationary_cython(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_stationary_cython(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_stationary_cython(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
static PyThread_type_lock __pyx_t_3[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'stationary_cython' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_stationary_cython(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("stationary_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_GPy__kern__src__stationary_cython) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "GPy.kern.src.stationary_cython")) {
if (unlikely(PyDict_SetItemString(modules, "GPy.kern.src.stationary_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "GPy/kern/src/stationary_cython.pyx":4
* #cython: nonecheck=False
* #cython: wraparound=False
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
* from cython.parallel import prange
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "GPy/kern/src/stationary_cython.pyx":9
* cimport cython
*
* np.import_array() # <<<<<<<<<<<<<<
*
* ctypedef np.float64_t DTYPE_t
*/
__pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 9, __pyx_L1_error)
/* "GPy/kern/src/stationary_cython.pyx":19
* void _lengthscale_grads "_lengthscale_grads" (int N, int M, int Q, double* tmp, double* X, double* X2, double* grad) nogil
*
* def grad_X(int N, int D, int M, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _X,
* np.ndarray[DTYPE_t, ndim=2] _X2,
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4kern_3src_17stationary_cython_1grad_X, NULL, __pyx_n_s_GPy_kern_src_stationary_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_grad_X, __pyx_t_1) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "GPy/kern/src/stationary_cython.pyx":32
*
* @cython.cdivision(True)
* def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad): # <<<<<<<<<<<<<<
* cdef int n,d,nd,m
* for nd in prange(N * D, nogil=True):
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4kern_3src_17stationary_cython_3grad_X_cython, NULL, __pyx_n_s_GPy_kern_src_stationary_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_grad_X_cython, __pyx_t_1) < 0) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "GPy/kern/src/stationary_cython.pyx":41
* grad[n,d] += tmp[n, m] * (X[n, d] - X2[m, d])
*
* def lengthscale_grads_in_c(int N, int M, int Q, # <<<<<<<<<<<<<<
* np.ndarray[DTYPE_t, ndim=2] _tmp,
* np.ndarray[DTYPE_t, ndim=2] _X,
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4kern_3src_17stationary_cython_5lengthscale_grads_in_c, NULL, __pyx_n_s_GPy_kern_src_stationary_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 41, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_lengthscale_grads_in_c, __pyx_t_1) < 0) __PYX_ERR(0, 41, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "GPy/kern/src/stationary_cython.pyx":53
* _lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
*
* def lengthscale_grads(int N, int M, int Q, double[:,:] tmp, double[:,:] X, double[:,:] X2, double[:] grad): # <<<<<<<<<<<<<<
* cdef int q, n, m
* cdef double gradq, dist
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4kern_3src_17stationary_cython_7lengthscale_grads, NULL, __pyx_n_s_GPy_kern_src_stationary_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_lengthscale_grads, __pyx_t_1) < 0) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "GPy/kern/src/stationary_cython.pyx":1
* #cython: boundscheck=False # <<<<<<<<<<<<<<
* #cython: nonecheck=False
* #cython: wraparound=False
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_3[0] = PyThread_allocate_lock();
__pyx_t_3[1] = PyThread_allocate_lock();
__pyx_t_3[2] = PyThread_allocate_lock();
__pyx_t_3[3] = PyThread_allocate_lock();
__pyx_t_3[4] = PyThread_allocate_lock();
__pyx_t_3[5] = PyThread_allocate_lock();
__pyx_t_3[6] = PyThread_allocate_lock();
__pyx_t_3[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init GPy.kern.src.stationary_cython", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init GPy.kern.src.stationary_cython");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* BufferGetAndValidate */
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((size_t)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
adi-tile-no.c | /**
* adi.c: This file is part of the PolyBench/C 3.2 test suite.
* Alternating Direction Implicit solver with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 10x1024x1024. */
#include "adi.h"
/* Array initialization. */
static void init_array(int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c3;
int c2;
int c4;
if (n >= 1) {
#pragma omp parallel for private(c4, c2, c3)
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {
X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;
A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;
B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double X[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",X[i][j]);
if ((i * 500 + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_adi(int tsteps,int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i1;
//int i2;
//#pragma scop
{
int c0;
int c2;
int c8;
int c9;
int c15;
if (n >= 1 && tsteps >= 1) {
for (c0 = 0; c0 <= tsteps + -1; c0++) {
if (n >= 2) {
#pragma omp parallel for private(c15, c9, c8)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];
}
}
}
}
}
#pragma omp parallel for private(c15)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];
}
}
if (n >= 2) {
#pragma omp parallel for private(c15, c9, c8)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];
}
}
}
}
}
#pragma omp parallel for private(c15)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*X)[500 + 0][500 + 0];
X = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *X, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_adi(tsteps,n, *X, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *X);
/* Be clean. */
free(((void *)X));
;
free(((void *)A));
;
free(((void *)B));
;
return 0;
}
|
diffusion_omp.h | //
// diffusion.h
//
//
// Created by Ibrahim Harrane on 11/05/2016.
//
//
#ifndef diffusion_h
#define diffusion_h
/*
*
* L: Desired parameter lenght
* N: Number of nodes
* K: Number of iterations
*
*
* A: N x N
* C: N x N
* w_0 L x N
* u L x N x K
* d K x N
* mu N x 1
* H L x N x K
* H_grad L x N x K
* M 1 x 1
* M_grad 1 x 1
* w* L x 1
*/
#include "mex.h"
#include "math.h"
#include <string.h>
#include <omp.h>
#if defined(NAN_EQUALS_ZERO)
#define IsNonZero(d) ((d)!=0.0 || mxIsNaN(d))
#else
#define IsNonZero(d) ((d)!=0.0)
#endif
double dotProduct(double *x, double *y, mwSize n)
{
mwSize i;
double z=0;
for (i=0; i<n; i++)
{
z += (*(x+i)) * (*(y+i));
}
return z;
}
void arrayProduct(double x, double *y, double *z, mwSize n)
{
mwSize i;
/* multiply each element y by x */
for (i=0; i<n; i++) {
*(z+i) = x * (*(y+i));
}
}
void arraySum(double *x, double *y, double *z, mwSize n)
{
mwSize i;
/* add each element y to x */
for (i=0; i<n; i++) {
*(z+i) = *(x+i) + (*(y+i));
}
}
/* ATC diffusion */
void ATC(mwSize N, mwSize L, mwSize K, double *w_star, double *w_0, double *A, double *C, double *mu, double *d, double *u, double *e, double *w_k)
{
mwSize i,j,k,m;
double *phi,*s,*u_j,*phi_j,p,p2,*res,*w_i;
/* Memory allocation */
phi=mxMalloc(L*N*sizeof(double));
s=mxMalloc(L*N*sizeof(double));
u_j=mxMalloc(L*sizeof(double));
w_i=mxMalloc(L*sizeof(double));
res=mxMalloc(L*sizeof(double));
//w_k=w_0
memcpy(w_k,w_0,L*N*sizeof(double));
for(k=0;k<K;k++)
{
/* Error calculation*/
//#pragma omp parallel for private(m)
for(i=0;i<N;i++)
{
// e(k,i)=0
double temp=0;
//*(e+i*K+k)=0;
// e(k,i)=norm(w_star-w_k(:,i),2)^2;
//#pragma omp parallel for reduction(+:temp)
for(m=0;m<L;m++)
{
//#pragma omp critical
//*(e+k+i*K)+=(*(w_star+m)-(*(w_k+L*i+m)))*(*(w_star+m)-(*(w_k+L*i+m)));
temp=temp+(w_star[m]-w_k[L*i+m])*(w_star[m]-w_k[L*i+m]);
}
//#pragma omp critical
e[k+i*K]=temp;
}
/* intermediate estmeate Phi calculation */
//s=0
memset (s,0,L*N*sizeof(double));
//memset (p,0,N*sizeof(double));
//#pragma omp parallel for private(j,p,p2,m)
memcpy(phi,w_k,L*N*sizeof(double));
//#pragma omp parallel for private(j,p,p2,m)
for (i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//p=u_j'*w_i
if (IsNonZero(C[j+N*i]))
{
p=0;
//#pragma omp parallel for reduction(+:p)
for(m=0; m<L ;m++)
{
p +=u[m+j*L+k*(N*L)]*w_k[L*i+m];
}
//p=mu(i)*C(j,i)*(d(k,j))-u_j'*w_i)
p2=mu[i]*C[j+N*i]*(d[k+K*j]-p);
//phi(:,i)+=mu(i)*C(j,i)*u_j*(d(k,j))-u_j'*w_i)=s+mu(i)*C(j,i)*u_j*p2
for(m=0;m<L;m++)
{
phi[m+L*i] += u[m+j*L+k*(N*L)]*p2;
}
}
}
}
//w_k=0
memset (w_k,0,L*N*sizeof(double));
//#pragma omp parallel for
for(i=0;i<N;i++)
{
//w(:,i)=w(:,i)+A(j,i)*phi(:,j)
for(j=0;j<N;j++)
{
if (IsNonZero(A[j+N*i]))
{
//#pragma omp parallel for shared(A,phi,w_k) private(m)
for(m=0;m<L;m++)
{
w_k[L*i+m]+= A[j+N*i]*phi[m+L*j];
}
}
}
}
}
mxFree(phi);
mxFree(s);
mxFree(w_i);
mxFree(u_j);
mxFree(res);
}
/* compressed diffusion */
void compressed_diffusion(mwSize N, mwSize L, mwSize K, mwSize M, double *w_star, double *w_0, double *A, double *C, double *mu, double *d, double *u, double *H, double *e, double *w_k)
{
mwSize i,j,k,m,idx;
double *phi,*s,*u_j,*phi_j,p,*res,*w_i;
/* Memory allocation */
phi=mxMalloc(L*N*sizeof(double));
s=mxMalloc(L*sizeof(double));
u_j=mxMalloc(L*sizeof(double));
w_i=mxMalloc(L*sizeof(double));
res=mxMalloc(L*sizeof(double));
//w_k=w_0
for(m=0;m<L;m++)
{
for(j=0;j<N;j++)
{
*(w_k+m+j*L)= *(w_0+m+j*L);
}
}
for(k=0;k<K;k++)
{
/* Error calculation*/
for(i=0;i<N;i++)
{
// e(k,i)=0
*(e+i*K+k)=0;
// e(k,i)=norm(w_star-w_k(:,i),2)^2;
for(m=0;m<L;m++)
{
*(e+k+i*K)+=(*(w_star+m)-(*(w_k+L*i+m)))*(*(w_star+m)-(*(w_k+L*i+m)));
}
}
/* intermediate estmeate Phi calculation */
for (i=0;i<N;i++)
{
//s=0
//w_i=w(:,i)
//phi(:,i)=w_i
for(m=0;m<L;m++)
{
*(s+m)=0;
*(w_i+m)=*(w_k+L*i+m);
*(phi+L*i+m)=*(w_i+m);
}
for(j=0;j<N;j++)
{
//w_i(idx(M+1:end))=w(M+1:end,j)
for(m=M; m<L ;m++)
{
idx=*(H+m+L*i+k*(N*L))-1;
*(w_i+idx)=*(w_k+L*j+idx);
}
//u_j=u(:,j,k)
for(m=0; m<L ;m++)
{
*(u_j+m)=*(u+m+j*L+k*(N*L));
}
//p=u_j'*w_i
p=dotProduct(u_j,w_i,L);
//p=mu(i)*C(j,i)*(d(k,j))-u_j'*w_i)
p=(*(mu+i))*(*(C+j+N*i))*(*(d+k+K*j)-p);
//res=mu(i)*C(j,i)*u_j*(d(k,j))-u_j'*w_i)
arrayProduct(p,u_j, res ,L);
//s=s+mu(i)*C(j,i)*u_j*(d(k,j))-u_j'*w_i)
for(m=0;m<L;m++)
{
*(s+m)+= *(res+m);
}
}
//phi(:,i)=w_i+s
for(m=0;m<L;m++)
{
*(phi+m+L*i)+=*(s+m);
}
}
for(i=0;i<N;i++)
{
//w(:,i)=0
for(m=0;m<L;m++)
{
*(w_k+m+i*L)=0;
}
//w(:,i)=w(:,i)+A(j,i)*phi(:,j)
for(j=0;j<N;j++)
{
for(m=0;m<L;m++)
{
*(w_k+L*i+m)+= (*(A+j+N*i))*(*(phi+m+L*j));
}
}
}
}
mxFree(phi);
mxFree(s);
mxFree(w_i);
mxFree(u_j);
mxFree(res);
}
/* doubly compressed Diffusion */
void doubly_compressed_diffusion(mwSize N, mwSize L, mwSize K, mwSize M, mwSize M_grad, double *w_star, double *w_0, double *A, double *C, double *mu, double *d, double *u, double *H, double *H_grad, double *e, double *w_k)
{
mwSize i,j,k,m,idx,idx_grad;
double *grad, *grad_i, *s, *u_j, *phi_j, p, *res, *w_i;
/* Memory allocation */
grad=mxMalloc(L*N*sizeof(double));
grad_i=mxMalloc(L*N*sizeof(double));
s=mxMalloc(L*sizeof(double));
u_j=mxMalloc(L*sizeof(double));
w_i=mxMalloc(L*sizeof(double));
res=mxMalloc(L*sizeof(double));
//w_k=w_0
for(m=0;m<L;m++)
{
for(j=0;j<N;j++)
{
*(w_k+m+j*L)= *(w_0+m+j*L);
}
}
for(k=0;k<K;k++)
{
/* Error calculation*/
for(i=0;i<N;i++)
{
// e(k,i)=0
*(e+i*K+k)=0;
// e(k,i)=norm(w_star-w_k(:,i),2)^2;
for(m=0;m<L;m++)
{
*(e+k+i*K)+=(*(w_star+m)-(*(w_k+L*i+m)))*(*(w_star+m)-(*(w_k+L*i+m)));
}
}
for (i=0;i<N;i++)
{
//s=0
//w_i=w(:,i)
for(m=0;m<L;m++)
{
*(s+m)=0;
*(w_i+m)=*(w_k+L*i+m);
}
for(j=0;j<N;j++)
{
//w_i(idx(M+1:end))=w(M+1:end,j)
for(m=M; m<L ;m++)
{
idx=*(H+m+L*i+k*(N*L))-1;
*(w_i+idx)=*(w_k+L*j+idx);
*(grad+m+j*L)=0;
}
//u_j=u(:,j,k)
for(m=0; m<L ;m++)
{
*(u_j+m)=*(u+m+j*L+k*(N*L));
}
//p=u_j'*w_i
p=dotProduct(u_j,w_i,L);
//p=C(j,i)*(d(k,j))-u_j'*w_i)
p=(*(C+j+N*i))*(*(d+k+K*j)-p);
//res=C(j,i)*u_j*(d(k,j))-u_j'*w_i)
arrayProduct(p, u_j, res, L);
//grad(:,j)=res(:)
for(m=0;m<L;m++)
{
*(grad+m+j*L)= *(res+m);
}
}
//grad_i=grad(:,i)
for(m=0;m<L;m++)
{
*(grad_i+m)=*(grad+m+L*i);
}
for(j=0;j<N;j++)
{
//grad_i(idx(M+1:end))=w(M+1:end,j)
for(m=M_grad; m<L ;m++)
{
idx_grad=*(H_grad+m+L*i+k*(N*L))-1;
*(grad_i+idx_grad)=*(grad+idx_grad+L*j);
}
//s=s+mu(i)*C(j,i)*u_j*grad_p
for(m=0;m<L;m++)
{
*(s+m)+= (*(mu+i))*(*(grad_i+m));
}
}
//w_k(:,i)=w_k+s
for(m=0;m<L;m++)
{
*(w_k+m+L*i)+=*(s+m);
}
}
}
mxFree(grad);
mxFree(grad_i);
mxFree(s);
mxFree(w_i);
mxFree(u_j);
mxFree(res);
}
#endif /* diffusion_h */
|
LAGraph_BF_full1.c | //------------------------------------------------------------------------------
// LAGraph_BF_full1.c: Bellman-Ford single-source shortest paths, returns tree,
// while diagonal of input matrix A needs not to be explicit 0
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// LAGraph_BF_full1: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full1 performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w.
// TODO: think about the return values
// LAGraph_BF_full1 returns GrB_SUCCESS if it succeeds. In this case, there
// are no negative-weight cycles in the graph, and d, pi, and h are returned.
// The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1,
// where p is the parent node of k-th node in the shortest path. In particular,
// pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest
// path.
// If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the
// GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and
// *ph_output respectively) will be NULL when negative-weight cycle detected.
// Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and
// so on, can be returned, if these errors are found by the underlying
// GrB_* functions.
//------------------------------------------------------------------------------
#define LAGraph_FREE_WORK \
{ \
GrB_free(&d); \
GrB_free(&dmasked); \
GrB_free(&dless); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_LT_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGraph_Free ((void**)&I); \
LAGraph_Free ((void**)&J); \
LAGraph_Free ((void**)&w); \
LAGraph_Free ((void**)&W); \
LAGraph_Free ((void**)&h); \
LAGraph_Free ((void**)&pi); \
}
#define LAGraph_FREE_ALL \
{ \
LAGraph_FREE_WORK \
GrB_free (pd_output); \
GrB_free (ppi_output); \
GrB_free (ph_output); \
}
#include <LAGraph.h>
#include <LAGraphX.h>
#include <LG_internal.h> // from src/utility
typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ;
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF1_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF1_lMIN
(
BF1_Tuple3_struct *z,
const BF1_Tuple3_struct *x,
const BF1_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF1_PLUSrhs
(
BF1_Tuple3_struct *z,
const BF1_Tuple3_struct *x,
const BF1_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF1_Identity
(
BF1_Tuple3_struct *z,
const BF1_Tuple3_struct *x
)
{
*z = *x;
}
void BF1_LT
(
bool *z,
const BF1_Tuple3_struct *x,
const BF1_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full1
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dmasked = NULL, dless = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_UnaryOp BF_Identity_Tuple3;
GrB_BinaryOp BF_LT_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF1_Tuple3_struct *W = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
LAGRAPH_OK (GrB_Matrix_nrows (&nrows, A)) ;
LAGRAPH_OK (GrB_Matrix_ncols (&ncols, A)) ;
LAGRAPH_OK (GrB_Matrix_nvals (&nz, A));
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGRAPH_OK (GrB_Type_new(&BF_Tuple3, sizeof(BF1_Tuple3_struct)));
// GrB_BinaryOp
LAGRAPH_OK (GrB_UnaryOp_new(&BF_Identity_Tuple3,
(void*) (&BF1_Identity), BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_LT_Tuple3,
(LAGraph_binary_function) (&BF1_LT), GrB_BOOL, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF1_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF1_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF1_Tuple3_struct BF_identity = (BF1_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGRAPH_OK (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_Malloc (nz, sizeof(double)) ;
W = LAGraph_Malloc (nz, sizeof(BF1_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads;
LAGRAPH_OK(LAGraph_GetNumThreads (&nthreads, NULL)) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
W[k] = (BF1_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
LAGRAPH_OK (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
LAGraph_Free ((void**)&I);
LAGraph_Free ((void**)&J);
LAGraph_Free ((void**)&W);
LAGraph_Free ((void**)&w);
//--------------------------------------------------------------------------
// create and initialize "distance" vector d, dmasked and dless
//--------------------------------------------------------------------------
LAGRAPH_OK (GrB_Vector_new(&d, BF_Tuple3, n));
// make d dense
LAGRAPH_OK(GrB_Vector_assign_UDT(d, NULL, NULL, (void*)&BF_identity,
GrB_ALL, n, NULL));
// initial distance from s to itself
BF1_Tuple3_struct d0 = (BF1_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s));
// creat dmasked as a sparse vector with only one entry at s
LAGRAPH_OK (GrB_Vector_new(&dmasked, BF_Tuple3, n));
LAGRAPH_OK(GrB_Vector_setElement_UDT(dmasked, &d0, s));
// create dless
LAGRAPH_OK (GrB_Vector_new(&dless, GrB_BOOL, n));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
bool any_dless= true; // if there is any newly found shortest path
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (any_dless && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
LAGRAPH_OK (GrB_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL));
// dless = d .< dtmp
//LAGRAPH_OK (GrB_Vector_clear(dless));
LAGRAPH_OK (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d,
NULL));
// if there is no entry with smaller distance then all shortest paths
// are found
LAGRAPH_OK (GrB_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless,
NULL)) ;
if(any_dless)
{
// update all entries with smaller distances
LAGRAPH_OK (GrB_apply(d, dless, NULL, BF_Identity_Tuple3, dmasked, NULL));
// only use entries that were just updated
LAGRAPH_OK (GrB_Vector_clear(dmasked));
LAGRAPH_OK (GrB_apply(dmasked, dless, NULL, BF_Identity_Tuple3, d, NULL));
//try:
//LAGRAPH_OK (GrB_assign(dmasked, dless, NULL, d, GrB_ALL, n, GrB_DESC_R);
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (any_dless)
{
// execute semiring again to check for negative-weight cycle
LAGRAPH_OK (GrB_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL));
// dless = d .< dtmp
//LAGRAPH_OK (GrB_Vector_clear(dless));
LAGRAPH_OK (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL));
// if there is no entry with smaller distance then all shortest paths
// are found
LAGRAPH_OK (GrB_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless, NULL)) ;
if(any_dless)
{
// printf("A negative-weight cycle found. \n");
LAGraph_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
I = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
W = LAGraph_Malloc (n, sizeof(BF1_Tuple3_struct)) ;
w = LAGraph_Malloc (n, sizeof(double)) ;
h = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
if (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d));
for (GrB_Index k = 0; k < n; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGRAPH_OK (GrB_Vector_new(pd_output, GrB_FP64, n));
LAGRAPH_OK (GrB_Vector_new(ppi_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_new(ph_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_build (*pd_output , I, w , n, GrB_MIN_FP64 ));
LAGRAPH_OK (GrB_Vector_build (*ppi_output, I, pi, n, GrB_MIN_UINT64));
LAGRAPH_OK (GrB_Vector_build (*ph_output , I, h , n, GrB_MIN_UINT64));
LAGraph_FREE_WORK;
return (GrB_SUCCESS) ;
}
|
GB_binop__first_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__first_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__first_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__first_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc64)
// A*D function (colscale): GB (_AxD__first_fc64)
// D*A function (rowscale): GB (_DxB__first_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__first_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__first_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_FC64 || GxB_NO_FIRST_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* Modifications Copyright 2020 by Secure XGBoost Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_
#define XGBOOST_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include "hist_util.h"
namespace xgboost {
namespace common {
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base. */
class Column {
public:
Column(ColumnType type, const uint32_t* index, uint32_t index_base,
const size_t* row_ind, size_t len)
: type_(type),
index_(index),
index_base_(index_base),
row_ind_(row_ind),
len_(len) {}
size_t Size() const { return len_; }
uint32_t GetGlobalBinIdx(size_t idx) const { return index_base_ + index_[idx]; }
uint32_t GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
// column.GetFeatureBinIdx(idx) + column.GetBaseIdx(idx) ==
// column.GetGlobalBinIdx(idx)
uint32_t GetBaseIdx() const { return index_base_; }
ColumnType GetType() const { return type_; }
size_t GetRowIdx(size_t idx) const {
// clang-tidy worries that row_ind_ might be a nullptr, which is possible,
// but low level structure is not safe anyway.
return type_ == ColumnType::kDenseColumn ? idx : row_ind_[idx]; // NOLINT
}
bool IsMissing(size_t idx) const {
return index_[idx] == std::numeric_limits<uint32_t>::max();
}
const size_t* GetRowData() const { return row_ind_; }
private:
ColumnType type_;
const uint32_t* index_;
uint32_t index_base_;
const size_t* row_ind_;
const size_t len_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.row_ptr.size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (bst_uint fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.row_ptr[fid + 1] - gmat.cut.row_ptr[fid], max_val);
}
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
boundary_.resize(nfeature);
size_t accum_index_ = 0;
size_t accum_row_ind_ = 0;
for (int32_t fid = 0; fid < nfeature; ++fid) {
boundary_[fid].index_begin = accum_index_;
boundary_[fid].row_ind_begin = accum_row_ind_;
if (type_[fid] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
accum_row_ind_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid];
accum_row_ind_ += feature_counts_[fid];
}
boundary_[fid].index_end = accum_index_;
boundary_[fid].row_ind_end = accum_row_ind_;
}
index_.resize(boundary_[nfeature - 1].index_end);
row_ind_.resize(boundary_[nfeature - 1].row_ind_end);
// store least bin id for each feature
index_base_.resize(nfeature);
for (bst_uint fid = 0; fid < nfeature; ++fid) {
index_base_[fid] = gmat.cut.row_ptr[fid];
}
// pre-fill index_ for dense columns
#pragma omp parallel for
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (type_[fid] == kDenseColumn) {
const size_t ibegin = boundary_[fid].index_begin;
uint32_t* begin = &index_[ibegin];
uint32_t* end = begin + nrow;
std::fill(begin, end, std::numeric_limits<uint32_t>::max());
// max() indicates missing values
}
}
// loop over all rows and fill column entries
// num_nonzeros[fid] = how many nonzeros have this feature accumulated so far?
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
// For oblivious.
row_wise_index_.resize(nrow * nfeature);
std::fill(row_wise_index_.begin(), row_wise_index_.end(),
std::numeric_limits<uint32_t>::max());
nfeature_ = nfeature;
for (size_t rid = 0; rid < nrow; ++rid) {
const size_t ibegin = gmat.row_ptr[rid];
const size_t iend = gmat.row_ptr[rid + 1];
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i) {
// NOTE: For dense data structure, below codes are already oblivious,
// given the |gmat.index| in one instance are already sorted.
const uint32_t bin_id = gmat.index[i];
while (bin_id >= gmat.cut.row_ptr[fid + 1]) {
++fid;
}
// For oblivious. Note this contains index_base.
row_wise_index_[rid * nfeature + fid] = bin_id;
if (type_[fid] == kDenseColumn) {
uint32_t* begin = &index_[boundary_[fid].index_begin];
begin[rid] = bin_id - index_base_[fid];
} else {
uint32_t* begin = &index_[boundary_[fid].index_begin];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[boundary_[fid].row_ind_begin + num_nonzeros[fid]] = rid;
++num_nonzeros[fid];
}
}
}
}
/* Fetch an individual column. This code should be used with XGBOOST_TYPE_SWITCH
to determine type of bin id's */
inline Column GetColumn(unsigned fid) const {
Column c(type_[fid], &index_[boundary_[fid].index_begin], index_base_[fid],
(type_[fid] == ColumnType::kSparseColumn ?
&row_ind_[boundary_[fid].row_ind_begin] : nullptr),
boundary_[fid].index_end - boundary_[fid].index_begin);
return c;
}
inline uint32_t OGetRowFeatureBinIndex(size_t row_idx, int fid) const {
// NOTE: `oaccess` between [row_idx * nfeature, (row_idx + 1) * nfeature]
// return row_wise_index_[row_idx * nfeature_ + fid];
return ObliviousArrayAccess(row_wise_index_.data() + row_idx * nfeature_,
fid, nfeature_);
}
private:
struct ColumnBoundary {
// indicate where each column's index and row_ind is stored.
// index_begin and index_end are logical offsets, so they should be converted to
// actual offsets by scaling with packing_factor_
size_t index_begin;
size_t index_end;
size_t row_ind_begin;
size_t row_ind_end;
};
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
SimpleArray<uint32_t> index_; // index_: may store smaller integers; needs padding
SimpleArray<size_t> row_ind_;
std::vector<ColumnBoundary> boundary_;
// For oblivious.
// Row wise feature index, this helps reduce `oaccess` range to number of features.
SimpleArray<uint32_t> row_wise_index_;
int32_t nfeature_;
// index_base_[fid]: least bin id for feature fid
std::vector<uint32_t> index_base_;
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
|
timer.h | #ifndef SPLATT_TIMER_H
#define SPLATT_TIMER_H
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include <time.h>
#include <stddef.h>
#include <stdbool.h>
#ifdef __MACH__
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
/******************************************************************************
* STRUCTURES
*****************************************************************************/
/**
* @brief Represents a wall-clock timer.
*/
typedef struct
{
bool running;
double seconds;
double start;
double stop;
} sp_timer_t;
/**
* @brief timer_id provides easy indexing into timers[].
*/
typedef enum
{
TIMER_LVL0, /* LEVEL 0 */
TIMER_ALL,
TIMER_CPD,
TIMER_REORDER,
TIMER_CONVERT,
TIMER_LVL1, /* LEVEL 1 */
TIMER_MTTKRP,
TIMER_INV,
TIMER_FIT,
TIMER_MATMUL,
TIMER_ATA,
TIMER_MATNORM,
TIMER_IO,
TIMER_PART,
TIMER_LVL2, /* LEVEL 2 */
#ifdef SPLATT_USE_MPI
TIMER_MPI,
TIMER_MPI_IDLE,
TIMER_MPI_COMM,
TIMER_MPI_ATA,
TIMER_MPI_REDUCE,
TIMER_MPI_PARTIALS,
TIMER_MPI_NORM,
TIMER_MPI_UPDATE,
TIMER_MPI_FIT,
/* timer max */
TIMER_MTTKRP_MAX,
TIMER_MPI_MAX,
TIMER_MPI_IDLE_MAX,
TIMER_MPI_COMM_MAX,
#endif
TIMER_SPLATT,
TIMER_GIGA,
TIMER_DFACTO,
TIMER_TTBOX,
TIMER_SORT,
TIMER_TILE,
TIMER_MISC,
TIMER_NTIMERS /* LEVEL N */
} timer_id;
/* globals */
int timer_lvl;
sp_timer_t timers[TIMER_NTIMERS];
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
#define init_timers splatt_init_timers
/**
* @brief Call timer_reset() on all of timers[].
*/
void init_timers(void);
#define report_times splatt_report_times
/**
* @brief Output a summary of all used timers.
*/
void report_times(void);
#define timer_inc_verbose splatt_timer_inc_verbose
/**
* @brief Increment timer verbosity to the next level;
*/
void timer_inc_verbose(void);
/**
* @brief Return the number of seconds since an unspecified time (e.g., Unix
* epoch). This is accomplished with a high-resolution monotonic timer,
* suitable for performance timing.
*
* @return The number of seconds.
*/
static inline double monotonic_seconds()
{
#ifdef __MACH__
/* OSX */
static mach_timebase_info_data_t info;
static double seconds_per_unit;
if(seconds_per_unit == 0) {
#pragma omp critical
{
mach_timebase_info(&info);
seconds_per_unit = (info.numer / info.denom) / 1e9;
}
}
return seconds_per_unit * mach_absolute_time();
#else
/* Linux systems */
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec + ts.tv_nsec * 1e-9;
#endif
}
/**
* @brief Reset all fields of a sp_timer_t.
*
* @param timer The timer to reset.
*/
static inline void timer_reset(sp_timer_t * const timer)
{
timer->running = false;
timer->seconds = 0;
timer->start = 0;
timer->stop = 0;
}
/**
* @brief Start a sp_timer_t. NOTE: this does not reset the timer.
*
* @param timer The timer to start.
*/
static inline void timer_start(sp_timer_t * const timer)
{
if(!timer->running) {
timer->running = true;
timer->start = monotonic_seconds();
}
}
/**
* @brief Stop a sp_timer_t and update its time.
*
* @param timer The timer to stop.
*/
static inline void timer_stop(sp_timer_t * const timer)
{
timer->running = false;
timer->stop = monotonic_seconds();
timer->seconds += timer->stop - timer->start;
}
/**
* @brief Give a sp_timer_t a fresh start by resetting and starting it.
*
* @param timer The timer to refresh.
*/
static inline void timer_fstart(sp_timer_t * const timer)
{
timer_reset(timer);
timer_start(timer);
}
#endif
|
conn_comp.c | #include "q_incs.h"
#include "_mmap.h"
#define NODE_TYPE uint32_t
#define MAXLINE 65535
int
main(
int argc,
char **argv
)
{
int status = 0;
FILE *lbfp = NULL;
FILE *ubfp = NULL;
FILE *tofp = NULL;
NODE_TYPE *lbl = NULL;
NODE_TYPE *lb = NULL;
NODE_TYPE *ub = NULL;
NODE_TYPE *to = NULL;
char *lb_X = NULL; size_t lb_nX = 0;
char *ub_X = NULL; size_t ub_nX = 0;
char *to_X = NULL; size_t to_nX = 0;
if ( argc != 1 ) { go_BYE(-1); }
status = rs_mmap("lb.bin", &lb_X, &lb_nX, 0); cBYE(status);
lb = (NODE_TYPE *)lb_X;
status = rs_mmap("ub.bin", &ub_X, &ub_nX, 0); cBYE(status);
ub = (NODE_TYPE *)ub_X;
status = rs_mmap("to.bin", &to_X, &to_nX, 0); cBYE(status);
to = (NODE_TYPE *)to_X;
uint64_t n_nodes = lb_nX / sizeof(NODE_TYPE);
fprintf(stderr, "Working on %ld nodes \n", n_nodes);
lbl = malloc(n_nodes * sizeof(NODE_TYPE));
return_if_malloc_failed(lbl);
for ( unsigned int i = 0; i < n_nodes; i++ ) {
lbl[i] = i;
}
bool is_any_change = true; // just to get in the first tome
for ( int iter = 0; is_any_change == true; iter++ ) {
is_any_change = false;
#pragma omp parallel for schedule(static)
for ( uint64_t i = 0; i < n_nodes; i++ ) {
bool l_is_any_change = false;
if ( ub[i] <= lb[i] ) { continue; }
NODE_TYPE minval = lbl[i];
for ( uint64_t j = lb[i]; j < ub[i]; j++ ) {
minval = mcr_min(minval, lbl[to[j]]);
}
if ( lbl[i] != minval ) {
l_is_any_change = true;
lbl[i] = minval;
}
if ( ( l_is_any_change ) && ( is_any_change == false ) ) {
is_any_change = true;
}
}
fprintf(stderr, "Pass %d \n", iter);
}
BYE:
if ( lb_X != NULL ) { munmap(lb_X, lb_nX); }
if ( ub_X != NULL ) { munmap(ub_X, lb_nX); }
if ( to_X != NULL ) { munmap(to_X, lb_nX); }
fclose_if_non_null(lbfp);
fclose_if_non_null(ubfp);
fclose_if_non_null(tofp);
return_if_malloc_failed(lbl);
return status;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.