source
stringlengths
3
92
c
stringlengths
26
2.25M
lock-nested.c
/* * lock-nested.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; omp_nest_lock_t lock; omp_init_nest_lock(&lock); #pragma omp parallel num_threads(2) shared(var) { omp_set_nest_lock(&lock); omp_set_nest_lock(&lock); var++; omp_unset_nest_lock(&lock); omp_unset_nest_lock(&lock); } omp_destroy_nest_lock(&lock); fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
MzMLHandler.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2015. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Andreas Bertsch $ // $Authors: Marc Sturm $ // -------------------------------------------------------------------------- #ifndef OPENMS_FORMAT_HANDLERS_MZMLHANDLER_H #define OPENMS_FORMAT_HANDLERS_MZMLHANDLER_H #include <OpenMS/CONCEPT/Exception.h> #include <OpenMS/CONCEPT/ProgressLogger.h> #include <OpenMS/CONCEPT/VersionInfo.h> #include <OpenMS/DATASTRUCTURES/CVMappings.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/CONCEPT/LogStream.h> #include <OpenMS/FORMAT/HANDLERS/XMLHandler.h> #include <OpenMS/FORMAT/HANDLERS/MzMLHandlerHelper.h> #include <OpenMS/FORMAT/VALIDATORS/MzMLValidator.h> #include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h> #include <OpenMS/FORMAT/Base64.h> #include <OpenMS/FORMAT/MSNumpressCoder.h> #include <OpenMS/FORMAT/VALIDATORS/SemanticValidator.h> #include <OpenMS/FORMAT/CVMappingFile.h> #include <OpenMS/FORMAT/ControlledVocabulary.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <OpenMS/SYSTEM/File.h> #include <sstream> #include <iostream> #include <QRegExp> //MISSING: // - more than one selected ion per precursor (warning if more than one) // - scanWindowList for each acquisition separately (currently for the whole spectrum only) // - instrumentConfigurationRef attribute for scan (why should the instrument change between scans? - warning if used) // - scanSettingsRef attribute for instrumentConfiguration tag (currently no information there because of missing mapping file entry - warning if used) // xs:id/xs:idref prefix list // - sf_ru : sourceFile (run) // - sf_sp : sourceFile (spectrum) // - sf_pr : sourceFile (precursor) // - sf_ac : sourceFile (acquisition) // - sa : sample // - ic : instrumentConfiguration // - so_dp : software (data processing) // - so_in : software (instrument) // - dp_sp : dataProcessing (spectrum) // - dp_bi : dataProcessing (binary data array) // - dp_ch : dataProcessing (chromatogram) namespace OpenMS { class ControlledVocabulary; namespace Internal { /** @brief XML handler for MzMLFile MapType has to be an MSExperiment or have the same interface. In read-mode, this class will parse an MzML XML file and append the input spectra to the provided MapType object or (if provided separately through setMSDataConsumer) to the provided IMSDataConsumer Interface. @note Do not use this class. It is only needed in MzMLFile. @note Only upon destruction of this class it can be guaranteed that all data has been appended to the appropriate consumer of the data. Do not try to access the data before that. @todo replace hardcoded cv stuff with more flexible handling via obo r/w. */ template <typename MapType> class MzMLHandler : public XMLHandler { public: /**@name Constructors and destructor */ //@{ /// Constructor for a read-only handler MzMLHandler(MapType& exp, const String& filename, const String& version, ProgressLogger& logger) : XMLHandler(filename, version), exp_(&exp), cexp_(0), options_(), spec_(), chromatogram_(), data_(), default_array_length_(0), in_spectrum_list_(false), decoder_(), logger_(logger), consumer_(NULL), scan_count(0), chromatogram_count(0), skip_chromatogram_(false), skip_spectrum_(false), rt_set_(false) /* , validator_(mapping_, cv_) */ { cv_.loadFromOBO("MS", File::find("/CV/psi-ms.obo")); cv_.loadFromOBO("PATO", File::find("/CV/quality.obo")); cv_.loadFromOBO("UO", File::find("/CV/unit.obo")); cv_.loadFromOBO("BTO", File::find("/CV/brenda.obo")); cv_.loadFromOBO("GO", File::find("/CV/goslim_goa.obo")); CVMappingFile().load(File::find("/MAPPING/ms-mapping.xml"), mapping_); //~ validator_ = Internal::MzMLValidator(mapping_, cv_); // check the version number of the mzML handler if (VersionInfo::VersionDetails::create(version_) == VersionInfo::VersionDetails::EMPTY) { LOG_ERROR << "MzMLHandler was initialized with an invalid version number: " << version_ << std::endl; } } /// Constructor for a write-only handler MzMLHandler(const MapType& exp, const String& filename, const String& version, const ProgressLogger& logger) : XMLHandler(filename, version), exp_(0), cexp_(&exp), options_(), spec_(), chromatogram_(), data_(), default_array_length_(0), in_spectrum_list_(false), decoder_(), logger_(logger), consumer_(NULL), scan_count(0), chromatogram_count(0), skip_chromatogram_(false), skip_spectrum_(false), rt_set_(false) /* , validator_(mapping_, cv_) */ { cv_.loadFromOBO("MS", File::find("/CV/psi-ms.obo")); cv_.loadFromOBO("PATO", File::find("/CV/quality.obo")); cv_.loadFromOBO("UO", File::find("/CV/unit.obo")); cv_.loadFromOBO("BTO", File::find("/CV/brenda.obo")); cv_.loadFromOBO("GO", File::find("/CV/goslim_goa.obo")); CVMappingFile().load(File::find("/MAPPING/ms-mapping.xml"), mapping_); //~ validator_ = Internal::MzMLValidator(mapping_, cv_); // check the version number of the mzML handler if (VersionInfo::VersionDetails::create(version_) == VersionInfo::VersionDetails::EMPTY) { LOG_ERROR << "MzMLHandler was initialized with an invalid version number: " << version_ << std::endl; } } /// Destructor virtual ~MzMLHandler() {} //@} /**@name XML Handling functions and output writing */ //@{ // Docu in base class virtual void endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname); // Docu in base class virtual void startElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname, const xercesc::Attributes& attributes); // Docu in base class virtual void characters(const XMLCh* const chars, const XMLSize_t length); //Docu in base class virtual void writeTo(std::ostream& os); //@} /**@name PeakFileOptions setters/getters The PeakFileOptions object determine the reading and writing of the MzML file. In read-mode the lazy-loading options determine whether meta-data only or the full raw data is read into memory and how this data should be handled. The MS-level, m/z, RT and Intensity range options determine which part of the MzML file is read into memory. */ //@{ /// Set the peak file options void setOptions(const PeakFileOptions& opt) { options_ = opt; } /// Get the peak file options PeakFileOptions& getOptions() { return options_; } //@} /// Get the spectra and chromatogram counts of a file void getCounts(Size& spectra_counts, Size& chromatogram_counts) { spectra_counts = scan_count; chromatogram_counts = chromatogram_count; } /// Set the IMSDataConsumer consumer which will consume the read data void setMSDataConsumer(Interfaces::IMSDataConsumer<MapType>* consumer) { consumer_ = consumer; } protected: /// Peak type typedef typename MapType::PeakType PeakType; /// Chromatogram peak type typedef typename MapType::ChromatogramPeakType ChromatogramPeakType; /// Spectrum type typedef MSSpectrum<PeakType> SpectrumType; /// Spectrum type typedef MSChromatogram<ChromatogramPeakType> ChromatogramType; typedef MzMLHandlerHelper::BinaryData BinaryData; void writeSpectrum_(std::ostream& os, const SpectrumType& spec, Size s, Internal::MzMLValidator& validator, bool renew_native_ids, std::vector<std::vector<DataProcessing> >& dps); void writeChromatogram_(std::ostream& os, const ChromatogramType& chromatogram, Size c, Internal::MzMLValidator& validator); template <typename ContainerT> void writeContainerData(std::ostream& os, const PeakFileOptions& pf_options_, const ContainerT& container, String array_type) { bool is32Bit = ((array_type == "intensity" && pf_options_.getIntensity32Bit()) || pf_options_.getMz32Bit()); if (!is32Bit || pf_options_.getNumpressConfigurationMassTime().np_compression != MSNumpressCoder::NONE) { std::vector<double> data_to_encode(container.size()); if (array_type == "intensity") { for (Size p = 0; p < container.size(); ++p) { data_to_encode[p] = container[p].getIntensity(); } } else { for (Size p = 0; p < container.size(); ++p) { data_to_encode[p] = container[p].getMZ(); } } writeBinaryDataArray(os, pf_options_, data_to_encode, false, array_type); } else { std::vector<float> data_to_encode(container.size()); if (array_type == "intensity") { for (Size p = 0; p < container.size(); ++p) { data_to_encode[p] = container[p].getIntensity(); } } else { for (Size p = 0; p < container.size(); ++p) { data_to_encode[p] = container[p].getMZ(); } } writeBinaryDataArray(os, pf_options_, data_to_encode, true, array_type); } } /** @brief Populate all spectra on the stack with data from input Will populate all spectra on the current work stack with data (using multiple threads if available) and append them to the result. */ void populateSpectraWithData() { // Whether spectrum should be populated with data if (options_.getFillData()) { size_t errCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < (SignedSize)spectrum_data_.size(); i++) { // parallel exception catching and re-throwing business if (!errCount) // no need to parse further if already an error was encountered { try { populateSpectraWithData_(spectrum_data_[i].data, spectrum_data_[i].default_array_length, options_, spectrum_data_[i].spectrum); if (options_.getSortSpectraByMZ() && !spectrum_data_[i].spectrum.isSorted()) { spectrum_data_[i].spectrum.sortByPosition(); } } catch (...) { #pragma omp critical(HandleException) ++errCount; } } } if (errCount != 0) { throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data."); } } // Append all spectra to experiment / consumer for (Size i = 0; i < spectrum_data_.size(); i++) { if (consumer_ != NULL) { consumer_->consumeSpectrum(spectrum_data_[i].spectrum); if (options_.getAlwaysAppendData()) { exp_->addSpectrum(spectrum_data_[i].spectrum); } } else { exp_->addSpectrum(spectrum_data_[i].spectrum); } } // Delete batch spectrum_data_.clear(); } /** @brief Populate all chromatograms on the stack with data from input Will populate all chromatograms on the current work stack with data (using multiple threads if available) and append them to the result. */ void populateChromatogramsWithData() { // Whether chromatogram should be populated with data if (options_.getFillData()) { size_t errCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < (SignedSize)chromatogram_data_.size(); i++) { // parallel exception catching and re-throwing business try { populateChromatogramsWithData_(chromatogram_data_[i].data, chromatogram_data_[i].default_array_length, options_, chromatogram_data_[i].chromatogram); if (options_.getSortChromatogramsByRT() && !chromatogram_data_[i].chromatogram.isSorted()) { chromatogram_data_[i].chromatogram.sortByPosition(); } } catch (...) {++errCount; } } if (errCount != 0) { throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data."); } } // Append all chromatograms to experiment / consumer for (Size i = 0; i < chromatogram_data_.size(); i++) { if (consumer_ != NULL) { consumer_->consumeChromatogram(chromatogram_data_[i].chromatogram); if (options_.getAlwaysAppendData()) { exp_->addChromatogram(chromatogram_data_[i].chromatogram); } } else { exp_->addChromatogram(chromatogram_data_[i].chromatogram); } } // Delete batch chromatogram_data_.clear(); } /** @brief Fill a single spectrum with data from input @note Do not modify any internal state variables of the class since this function will be executed in parallel. */ template <typename SpectrumType> void populateSpectraWithData_(std::vector<MzMLHandlerHelper::BinaryData>& input_data, Size& default_arr_length, const PeakFileOptions& peak_file_options, SpectrumType& spectrum) { typedef typename SpectrumType::PeakType PeakType; //decode all base64 arrays MzMLHandlerHelper::decodeBase64Arrays(input_data); //look up the precision and the index of the intensity and m/z array bool mz_precision_64 = true; bool int_precision_64 = true; SignedSize mz_index = -1; SignedSize int_index = -1; MzMLHandlerHelper::computeDataProperties_(input_data, mz_precision_64, mz_index, "m/z array"); MzMLHandlerHelper::computeDataProperties_(input_data, int_precision_64, int_index, "intensity array"); //Abort if no m/z or intensity array is present if (int_index == -1 || mz_index == -1) { //if defaultArrayLength > 0 : warn that no m/z or int arrays is present if (default_arr_length != 0) { warning(LOAD, String("The m/z or intensity array of spectrum '") + spectrum.getNativeID() + "' is missing and default_arr_length is " + default_arr_length + "."); } return; } // Error if intensity or m/z is encoded as int32|64 - they should be float32|64! if ((input_data[mz_index].ints_32.size() > 0) || (input_data[mz_index].ints_64.size() > 0)) { fatalError(LOAD, "Encoding m/z array as integer is not allowed!"); } if ((input_data[int_index].ints_32.size() > 0) || (input_data[int_index].ints_64.size() > 0)) { fatalError(LOAD, "Encoding intensity array as integer is not allowed!"); } // Warn if the decoded data has a different size than the defaultArrayLength Size mz_size = mz_precision_64 ? input_data[mz_index].floats_64.size() : input_data[mz_index].floats_32.size(); Size int_size = int_precision_64 ? input_data[int_index].floats_64.size() : input_data[int_index].floats_32.size(); // Check if int-size and mz-size are equal if (mz_size != int_size) { fatalError(LOAD, String("The length of m/z and integer values of spectrum '") + spectrum.getNativeID() + "' differ (mz-size: " + mz_size + ", int-size: " + int_size + "! Not reading spectrum!"); } bool repair_array_length = false; if (default_arr_length != mz_size) { warning(LOAD, String("The m/z array of spectrum '") + spectrum.getNativeID() + "' has the size " + mz_size + ", but it should have size " + default_arr_length + " (defaultArrayLength)."); repair_array_length = true; } if (default_arr_length != int_size) { warning(LOAD, String("The intensity array of spectrum '") + spectrum.getNativeID() + "' has the size " + int_size + ", but it should have size " + default_arr_length + " (defaultArrayLength)."); repair_array_length = true; } if (repair_array_length) { default_arr_length = int_size; warning(LOAD, String("Fixing faulty defaultArrayLength to ") + default_arr_length + "."); } //create meta data arrays and reserve enough space for the content if (input_data.size() > 2) { for (Size i = 0; i < input_data.size(); i++) { if (input_data[i].meta.getName() != "m/z array" && input_data[i].meta.getName() != "intensity array") { if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_FLOAT) { //create new array spectrum.getFloatDataArrays().resize(spectrum.getFloatDataArrays().size() + 1); //reserve space in the array spectrum.getFloatDataArrays().back().reserve(input_data[i].size); //copy meta info into MetaInfoDescription spectrum.getFloatDataArrays().back().MetaInfoDescription::operator=(input_data[i].meta); } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_INT) { //create new array spectrum.getIntegerDataArrays().resize(spectrum.getIntegerDataArrays().size() + 1); //reserve space in the array spectrum.getIntegerDataArrays().back().reserve(input_data[i].size); //copy meta info into MetaInfoDescription spectrum.getIntegerDataArrays().back().MetaInfoDescription::operator=(input_data[i].meta); } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_STRING) { //create new array spectrum.getStringDataArrays().resize(spectrum.getStringDataArrays().size() + 1); //reserve space in the array spectrum.getStringDataArrays().back().reserve(input_data[i].decoded_char.size()); //copy meta info into MetaInfoDescription spectrum.getStringDataArrays().back().MetaInfoDescription::operator=(input_data[i].meta); } } } } // Copy meta data from m/z and intensity binary // We don't have this as a separate location => store it in spectrum for (Size i = 0; i < input_data.size(); i++) { if (input_data[i].meta.getName() == "m/z array" || input_data[i].meta.getName() == "intensity array") { std::vector<UInt> keys; input_data[i].meta.getKeys(keys); for (Size k = 0; k < keys.size(); ++k) { spectrum.setMetaValue(keys[k], input_data[i].meta.getMetaValue(keys[k])); } } } //add the peaks and the meta data to the container (if they pass the restrictions) PeakType tmp; spectrum.reserve(default_arr_length); for (Size n = 0; n < default_arr_length; n++) { double mz = mz_precision_64 ? input_data[mz_index].floats_64[n] : input_data[mz_index].floats_32[n]; double intensity = int_precision_64 ? input_data[int_index].floats_64[n] : input_data[int_index].floats_32[n]; if ((!peak_file_options.hasMZRange() || peak_file_options.getMZRange().encloses(DPosition<1>(mz))) && (!peak_file_options.hasIntensityRange() || peak_file_options.getIntensityRange().encloses(DPosition<1>(intensity)))) { //add peak tmp.setIntensity(intensity); tmp.setMZ(mz); spectrum.push_back(tmp); //add meta data UInt meta_float_array_index = 0; UInt meta_int_array_index = 0; UInt meta_string_array_index = 0; for (Size i = 0; i < input_data.size(); i++) //loop over all binary data arrays { if (input_data[i].meta.getName() != "m/z array" && input_data[i].meta.getName() != "intensity array") // is meta data array? { if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_FLOAT) { if (n < input_data[i].size) { double value = (input_data[i].precision == MzMLHandlerHelper::BinaryData::PRE_64) ? input_data[i].floats_64[n] : input_data[i].floats_32[n]; spectrum.getFloatDataArrays()[meta_float_array_index].push_back(value); } ++meta_float_array_index; } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_INT) { if (n < input_data[i].size) { Int64 value = (input_data[i].precision == MzMLHandlerHelper::BinaryData::PRE_64) ? input_data[i].ints_64[n] : input_data[i].ints_32[n]; spectrum.getIntegerDataArrays()[meta_int_array_index].push_back(value); } ++meta_int_array_index; } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_STRING) { if (n < input_data[i].decoded_char.size()) { String value = input_data[i].decoded_char[n]; spectrum.getStringDataArrays()[meta_string_array_index].push_back(value); } ++meta_string_array_index; } } } } } } /** @brief Fill a single chromatogram with data from input @note Do not modify any internal state variables of the class since this function will be executed in parallel. */ template <typename ChromatogramType> void populateChromatogramsWithData_(std::vector<MzMLHandlerHelper::BinaryData>& input_data, Size& default_arr_length, const PeakFileOptions& peak_file_options, ChromatogramType& inp_chromatogram) { typedef typename ChromatogramType::PeakType ChromatogramPeakType; //decode all base64 arrays MzMLHandlerHelper::decodeBase64Arrays(input_data); //look up the precision and the index of the intensity and m/z array bool int_precision_64 = true; bool rt_precision_64 = true; SignedSize int_index = -1; SignedSize rt_index = -1; MzMLHandlerHelper::computeDataProperties_(input_data, rt_precision_64, rt_index, "time array"); MzMLHandlerHelper::computeDataProperties_(input_data, int_precision_64, int_index, "intensity array"); //Abort if no m/z or intensity array is present if (int_index == -1 || rt_index == -1) { //if defaultArrayLength > 0 : warn that no m/z or int arrays is present if (default_arr_length != 0) { warning(LOAD, String("The m/z or intensity array of chromatogram '") + inp_chromatogram.getNativeID() + "' is missing and default_arr_length is " + default_arr_length + "."); } return; } //Warn if the decoded data has a different size than the defaultArrayLength Size rt_size = rt_precision_64 ? input_data[rt_index].floats_64.size() : input_data[rt_index].floats_32.size(); if (default_arr_length != rt_size) { warning(LOAD, String("The base64-decoded rt array of chromatogram '") + inp_chromatogram.getNativeID() + "' has the size " + rt_size + ", but it should have size " + default_arr_length + " (defaultArrayLength)."); } Size int_size = int_precision_64 ? input_data[int_index].floats_64.size() : input_data[int_index].floats_32.size(); if (default_arr_length != int_size) { warning(LOAD, String("The base64-decoded intensity array of chromatogram '") + inp_chromatogram.getNativeID() + "' has the size " + int_size + ", but it should have size " + default_arr_length + " (defaultArrayLength)."); } //create meta data arrays and reserve enough space for the content if (input_data.size() > 2) { for (Size i = 0; i < input_data.size(); i++) { if (input_data[i].meta.getName() != "intensity array" && input_data[i].meta.getName() != "time array") { if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_FLOAT) { //create new array inp_chromatogram.getFloatDataArrays().resize(inp_chromatogram.getFloatDataArrays().size() + 1); //reserve space in the array inp_chromatogram.getFloatDataArrays().back().reserve(input_data[i].size); //copy meta info into MetaInfoDescription inp_chromatogram.getFloatDataArrays().back().MetaInfoDescription::operator=(input_data[i].meta); } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_INT) { //create new array inp_chromatogram.getIntegerDataArrays().resize(inp_chromatogram.getIntegerDataArrays().size() + 1); //reserve space in the array inp_chromatogram.getIntegerDataArrays().back().reserve(input_data[i].size); //copy meta info into MetaInfoDescription inp_chromatogram.getIntegerDataArrays().back().MetaInfoDescription::operator=(input_data[i].meta); } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_STRING) { //create new array inp_chromatogram.getStringDataArrays().resize(inp_chromatogram.getStringDataArrays().size() + 1); //reserve space in the array inp_chromatogram.getStringDataArrays().back().reserve(input_data[i].decoded_char.size()); //copy meta info into MetaInfoDescription inp_chromatogram.getStringDataArrays().back().MetaInfoDescription::operator=(input_data[i].meta); } } } } //copy meta data from time and intensity binary //We don't have this as a separate location => store it in spectrum for (Size i = 0; i < input_data.size(); i++) { if (input_data[i].meta.getName() == "time array" || input_data[i].meta.getName() == "intensity array") { std::vector<UInt> keys; input_data[i].meta.getKeys(keys); for (Size k = 0; k < keys.size(); ++k) { inp_chromatogram.setMetaValue(keys[k], input_data[i].meta.getMetaValue(keys[k])); } } } //add the peaks and the meta data to the container (if they pass the restrictions) inp_chromatogram.reserve(default_arr_length); ChromatogramPeakType tmp; for (Size n = 0; n < default_arr_length; n++) { double rt = rt_precision_64 ? input_data[rt_index].floats_64[n] : input_data[rt_index].floats_32[n]; double intensity = int_precision_64 ? input_data[int_index].floats_64[n] : input_data[int_index].floats_32[n]; if ((!peak_file_options.hasRTRange() || peak_file_options.getRTRange().encloses(DPosition<1>(rt))) && (!peak_file_options.hasIntensityRange() || peak_file_options.getIntensityRange().encloses(DPosition<1>(intensity)))) { //add peak tmp.setIntensity(intensity); tmp.setRT(rt); inp_chromatogram.push_back(tmp); //add meta data UInt meta_float_array_index = 0; UInt meta_int_array_index = 0; UInt meta_string_array_index = 0; for (Size i = 0; i < input_data.size(); i++) //loop over all binary data arrays { if (input_data[i].meta.getName() != "intensity array" && input_data[i].meta.getName() != "time array") // is meta data array? { if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_FLOAT) { if (n < input_data[i].size) { double value = (input_data[i].precision == MzMLHandlerHelper::BinaryData::PRE_64) ? input_data[i].floats_64[n] : input_data[i].floats_32[n]; inp_chromatogram.getFloatDataArrays()[meta_float_array_index].push_back(value); } ++meta_float_array_index; } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_INT) { if (n < input_data[i].size) { Int64 value = (input_data[i].precision == MzMLHandlerHelper::BinaryData::PRE_64) ? input_data[i].ints_64[n] : input_data[i].ints_32[n]; inp_chromatogram.getIntegerDataArrays()[meta_int_array_index].push_back(value); } ++meta_int_array_index; } else if (input_data[i].data_type == MzMLHandlerHelper::BinaryData::DT_STRING) { if (n < input_data[i].decoded_char.size()) { String value = input_data[i].decoded_char[n]; inp_chromatogram.getStringDataArrays()[meta_string_array_index].push_back(value); } ++meta_string_array_index; } } } } } } template <typename DataType> void writeBinaryDataArray(std::ostream& os, const PeakFileOptions& pf_options_, std::vector<DataType> data_to_encode, bool is32bit, String array_type) { String encoded_string; bool no_numpress = true; // Compute the array-type and the compression CV term String cv_term_type; String compression_term; String compression_term_no_np; MSNumpressCoder::NumpressConfig np_config; if (array_type == "mz") { cv_term_type = "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000514\" name=\"m/z array\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; compression_term = MzMLHandlerHelper::getCompressionTerm_(pf_options_, pf_options_.getNumpressConfigurationMassTime(), true); compression_term_no_np = MzMLHandlerHelper::getCompressionTerm_(pf_options_, pf_options_.getNumpressConfigurationMassTime(), false); np_config = pf_options_.getNumpressConfigurationMassTime(); } else if (array_type == "time") { cv_term_type = "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000595\" name=\"time array\" unitAccession=\"UO:0000010\" unitName=\"second\" unitCvRef=\"MS\" />\n"; compression_term = MzMLHandlerHelper::getCompressionTerm_(pf_options_, pf_options_.getNumpressConfigurationMassTime(), true); compression_term_no_np = MzMLHandlerHelper::getCompressionTerm_(pf_options_, pf_options_.getNumpressConfigurationMassTime(), false); np_config = pf_options_.getNumpressConfigurationMassTime(); } else if (array_type == "intensity") { cv_term_type = "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000515\" name=\"intensity array\" unitAccession=\"MS:1000131\" unitName=\"number of detector counts\" unitCvRef=\"MS\"/>\n"; compression_term = MzMLHandlerHelper::getCompressionTerm_(pf_options_, pf_options_.getNumpressConfigurationIntensity(), true); compression_term_no_np = MzMLHandlerHelper::getCompressionTerm_(pf_options_, pf_options_.getNumpressConfigurationIntensity(), false); np_config = pf_options_.getNumpressConfigurationIntensity(); } else { throw Exception::InvalidValue(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Unknown array type", array_type); } // Try numpress encoding (if it is enabled) and fall back to regular encoding if it fails if (np_config.np_compression != MSNumpressCoder::NONE) { MSNumpressCoder().encodeNP(data_to_encode, encoded_string, pf_options_.getCompression(), np_config); if (!encoded_string.empty()) { // numpress succeeded no_numpress = false; os << "\t\t\t\t\t<binaryDataArray encodedLength=\"" << encoded_string.size() << "\">\n"; os << cv_term_type; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000523\" name=\"64-bit float\" />\n"; } } // Regular DataArray without numpress (either 32 or 64 bit encoded) if (is32bit && no_numpress) { compression_term = compression_term_no_np; // select the no-numpress term decoder_.encode(data_to_encode, Base64::BYTEORDER_LITTLEENDIAN, encoded_string, pf_options_.getCompression()); os << "\t\t\t\t\t<binaryDataArray encodedLength=\"" << encoded_string.size() << "\">\n"; os << cv_term_type; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000521\" name=\"32-bit float\" />\n"; } else if (!is32bit && no_numpress) { compression_term = compression_term_no_np; // select the no-numpress term decoder_.encode(data_to_encode, Base64::BYTEORDER_LITTLEENDIAN, encoded_string, pf_options_.getCompression()); os << "\t\t\t\t\t<binaryDataArray encodedLength=\"" << encoded_string.size() << "\">\n"; os << cv_term_type; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000523\" name=\"64-bit float\" />\n"; } os << "\t\t\t\t\t\t" << compression_term << "\n"; os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } void writeHeader_(std::ostream& os, const MapType& exp, std::vector<std::vector<DataProcessing> >& dps, Internal::MzMLValidator& validator); /// map pointer for reading MapType* exp_; /// map pointer for writing const MapType* cexp_; /// Options that can be set for loading/storing PeakFileOptions options_; /**@name temporary data structures to hold parsed data */ //@{ /// The current spectrum SpectrumType spec_; /// The current chromatogram ChromatogramType chromatogram_; /// The spectrum data (or chromatogram data) std::vector<BinaryData> data_; /// The default number of peaks in the current spectrum Size default_array_length_; /// Flag that indicates that we're inside a spectrum (in contrast to a chromatogram) bool in_spectrum_list_; /// Id of the current list. Used for referencing param group, source file, sample, software, ... String current_id_; /// The referencing param groups: id => array (accession, value) Map<String, std::vector<SemanticValidator::CVTerm> > ref_param_; /// The source files: id => SourceFile Map<String, SourceFile> source_files_; /// The sample list: id => Sample Map<String, Sample> samples_; /// The software list: id => Software Map<String, Software> software_; /// The data processing list: id => Instrument Map<String, Instrument> instruments_; /// The data processing list: id => Instrument Map<String, std::vector<DataProcessing> > processing_; /// id of the default data processing (used when no processing is defined) String default_processing_; /** @brief Data necessary to generate a single spectrum Small struct holds all data necessary to populate a spectrum at a later timepoint (since reading of the base64 data and generation of spectra can be done at distinct timepoints). */ struct SpectrumData { std::vector<BinaryData> data; Size default_array_length; SpectrumType spectrum; bool skip_data; }; /// Vector of spectrum data stored for later parallel processing std::vector<SpectrumData> spectrum_data_; /** @brief Data necessary to generate a single chromatogram Small struct holds all data necessary to populate a chromatogram at a later timepoint (since reading of the base64 data and generation of chromatogram can be done at distinct timepoints). */ struct ChromatogramData { std::vector<BinaryData> data; Size default_array_length; ChromatogramType chromatogram; }; /// Vector of chromatogram data stored for later parallel processing std::vector<ChromatogramData> chromatogram_data_; //@} /**@name temporary data structures to hold written data */ //@{ std::vector<std::pair<std::string, long> > spectra_offsets; std::vector<std::pair<std::string, long> > chromatograms_offsets; //@} /// Decoder/Encoder for Base64-data in MzML Base64 decoder_; /// Progress logger const ProgressLogger& logger_; /// Consumer class to work on spectra Interfaces::IMSDataConsumer<MapType>* consumer_; /// Counting spectra and chromatograms UInt scan_count; UInt chromatogram_count; /// Flag that indicates whether this spectrum should be skipped (due to options) bool skip_chromatogram_; bool skip_spectrum_; // Remember whether the RT of the spectrum was set or not bool rt_set_; ///Controlled vocabulary (psi-ms from OpenMS/share/OpenMS/CV/psi-ms.obo) ControlledVocabulary cv_; CVMappings mapping_; //~ Internal::MzMLValidator validator_; ///Count of selected ions UInt selected_ion_count_; /* /// Fills the current spectrum with peaks and meta data void fillData_(); */ /// Fills the current chromatogram with data points and meta data void fillChromatogramData_(); /// Handles CV terms void handleCVParam_(const String& parent_parent_tag, const String& parent_tag, /* const String & cvref, */ const String& accession, const String& name, const String& value, const String& unit_accession = ""); /// Handles user terms void handleUserParam_(const String& parent_parent_tag, const String& parent_tag, const String& name, const String& type, const String& value); /// Writes user terms void writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, UInt indent, String path, Internal::MzMLValidator& validator) const; /// Looks up a child CV term of @p parent_accession with the name @p name. If no such term is found, an empty term is returned. ControlledVocabulary::CVTerm getChildWithName_(const String& parent_accession, const String& name) const; /// Helper method that writes a software void writeSoftware_(std::ostream& os, const String& id, const Software& software, Internal::MzMLValidator& validator); /// Helper method that writes a source file void writeSourceFile_(std::ostream& os, const String& id, const SourceFile& software, Internal::MzMLValidator& validator); /// Helper method that writes a data processing list void writeDataProcessing_(std::ostream& os, const String& id, const std::vector<DataProcessing>& dps, Internal::MzMLValidator& validator); /// Helper method that write precursor information from spectra and chromatograms void writePrecursor_(std::ostream& os, const Precursor& precursor, Internal::MzMLValidator& validator); /// Helper method that write precursor information from spectra and chromatograms void writeProduct_(std::ostream& os, const Product& product, Internal::MzMLValidator& validator); /// Helper method to write an CV based on a meta value String writeCV_(const ControlledVocabulary::CVTerm& c, const DataValue& metaValue) const; /// Helper method to validate if the given CV is allowed in the current location (path) bool validateCV_(const ControlledVocabulary::CVTerm& c, const String& path, const Internal::MzMLValidator& validator) const; }; //-------------------------------------------------------------------------------- template <typename MapType> void MzMLHandler<MapType>::characters(const XMLCh* const chars, const XMLSize_t length) { if (skip_spectrum_ || skip_chromatogram_) return; String& current_tag = open_tags_.back(); if (current_tag == "binary") { // Since we convert a Base64 string here, it can only contain plain ASCII sm_.appendASCII(chars, length, data_.back().base64); } else if (current_tag == "offset" || current_tag == "indexListOffset" || current_tag == "fileChecksum") { //do nothing for // - index // - checksum // - binary chromatogram data } else { char* transcoded_chars = sm_.convert(chars); String transcoded_chars2 = transcoded_chars; transcoded_chars2.trim(); if (transcoded_chars2 != "") warning(LOAD, String("Unhandled character content in tag '") + current_tag + "': " + transcoded_chars2); } } template <typename MapType> void MzMLHandler<MapType>::startElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname, const xercesc::Attributes& attributes) { static const XMLCh* s_count = xercesc::XMLString::transcode("count"); static const XMLCh* s_default_array_length = xercesc::XMLString::transcode("defaultArrayLength"); static const XMLCh* s_array_length = xercesc::XMLString::transcode("arrayLength"); static const XMLCh* s_accession = xercesc::XMLString::transcode("accession"); static const XMLCh* s_name = xercesc::XMLString::transcode("name"); static const XMLCh* s_type = xercesc::XMLString::transcode("type"); static const XMLCh* s_value = xercesc::XMLString::transcode("value"); static const XMLCh* s_unit_accession = xercesc::XMLString::transcode("unitAccession"); static const XMLCh* s_id = xercesc::XMLString::transcode("id"); static const XMLCh* s_spot_id = xercesc::XMLString::transcode("spotID"); //~ static const XMLCh * s_cvref = xercesc::XMLString::transcode("cvRef"); TODO static const XMLCh* s_ref = xercesc::XMLString::transcode("ref"); static const XMLCh* s_version = xercesc::XMLString::transcode("version"); static const XMLCh* s_order = xercesc::XMLString::transcode("order"); static const XMLCh* s_location = xercesc::XMLString::transcode("location"); static const XMLCh* s_sample_ref = xercesc::XMLString::transcode("sampleRef"); static const XMLCh* s_software_ref = xercesc::XMLString::transcode("softwareRef"); static const XMLCh* s_source_file_ref = xercesc::XMLString::transcode("sourceFileRef"); static const XMLCh* s_default_instrument_configuration_ref = xercesc::XMLString::transcode("defaultInstrumentConfigurationRef"); static const XMLCh* s_instrument_configuration_ref = xercesc::XMLString::transcode("instrumentConfigurationRef"); static const XMLCh* s_default_data_processing_ref = xercesc::XMLString::transcode("defaultDataProcessingRef"); static const XMLCh* s_data_processing_ref = xercesc::XMLString::transcode("dataProcessingRef"); static const XMLCh* s_start_time_stamp = xercesc::XMLString::transcode("startTimeStamp"); static const XMLCh* s_external_spectrum_id = xercesc::XMLString::transcode("externalSpectrumID"); static const XMLCh* s_default_source_file_ref = xercesc::XMLString::transcode("defaultSourceFileRef"); static const XMLCh* s_scan_settings_ref = xercesc::XMLString::transcode("scanSettingsRef"); String tag = sm_.convert(qname); open_tags_.push_back(tag); //determine parent tag String parent_tag; if (open_tags_.size() > 1) parent_tag = *(open_tags_.end() - 2); String parent_parent_tag; if (open_tags_.size() > 2) parent_parent_tag = *(open_tags_.end() - 3); //do nothing until a new spectrum is reached if (tag != "spectrum" && skip_spectrum_) return; if (tag != "chromatogram" && skip_chromatogram_) return; if (tag == "spectrum") { //number of peaks spec_ = SpectrumType(); default_array_length_ = attributeAsInt_(attributes, s_default_array_length); //spectrum source file String source_file_ref; if (optionalAttributeAsString_(source_file_ref, attributes, s_source_file_ref)) { spec_.setSourceFile(source_files_[source_file_ref]); } //native id spec_.setNativeID(attributeAsString_(attributes, s_id)); //maldi spot id String maldi_spot_id; if (optionalAttributeAsString_(maldi_spot_id, attributes, s_spot_id)) { spec_.setMetaValue("maldi_spot_id", maldi_spot_id); } //data processing String data_processing_ref; if (optionalAttributeAsString_(data_processing_ref, attributes, s_data_processing_ref)) { spec_.setDataProcessing(processing_[data_processing_ref]); } else { spec_.setDataProcessing(processing_[default_processing_]); } } else if (tag == "chromatogram") { chromatogram_ = ChromatogramType(); default_array_length_ = attributeAsInt_(attributes, s_default_array_length); String source_file_ref; if (optionalAttributeAsString_(source_file_ref, attributes, s_source_file_ref)) { chromatogram_.setSourceFile(source_files_[source_file_ref]); } // native id chromatogram_.setNativeID(attributeAsString_(attributes, s_id)); // data processing String data_processing_ref; if (optionalAttributeAsString_(data_processing_ref, attributes, s_data_processing_ref)) { chromatogram_.setDataProcessing(processing_[data_processing_ref]); } else { chromatogram_.setDataProcessing(processing_[default_processing_]); } } else if (tag == "spectrumList") { //default data processing default_processing_ = attributeAsString_(attributes, s_default_data_processing_ref); //Abort if we need meta data only if (options_.getMetadataOnly()) throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__); UInt count = attributeAsInt_(attributes, s_count); exp_->reserveSpaceSpectra(count); logger_.startProgress(0, count, "loading spectra list"); in_spectrum_list_ = true; } else if (tag == "chromatogramList") { // default data processing default_processing_ = attributeAsString_(attributes, s_default_data_processing_ref); //Abort if we need meta data only if (options_.getMetadataOnly()) throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__); UInt count = attributeAsInt_(attributes, s_count); exp_->reserveSpaceChromatograms(count); logger_.startProgress(0, count, "loading chromatogram list"); in_spectrum_list_ = false; } else if (tag == "binaryDataArrayList" /* && in_spectrum_list_*/) { data_.reserve(attributeAsInt_(attributes, s_count)); } else if (tag == "binaryDataArray" /* && in_spectrum_list_*/) { data_.push_back(BinaryData()); data_.back().np_compression = MSNumpressCoder::NONE; // ensure that numpress compression is initially set to none ... data_.back().compression = false; // ensure that zlib compression is initially set to none ... //array length Int array_length = (Int) default_array_length_; optionalAttributeAsInt_(array_length, attributes, s_array_length); data_.back().size = array_length; //data processing String data_processing_ref; if (optionalAttributeAsString_(data_processing_ref, attributes, s_data_processing_ref)) { data_.back().meta.setDataProcessing(processing_[data_processing_ref]); } } else if (tag == "cvParam") { String value = ""; optionalAttributeAsString_(value, attributes, s_value); String unit_accession = ""; optionalAttributeAsString_(unit_accession, attributes, s_unit_accession); handleCVParam_(parent_parent_tag, parent_tag, /* attributeAsString_(attributes, s_cvref), */ attributeAsString_(attributes, s_accession), attributeAsString_(attributes, s_name), value, unit_accession); } else if (tag == "userParam") { String type = ""; optionalAttributeAsString_(type, attributes, s_type); String value = ""; optionalAttributeAsString_(value, attributes, s_value); handleUserParam_(parent_parent_tag, parent_tag, attributeAsString_(attributes, s_name), type, value); } else if (tag == "referenceableParamGroup") { current_id_ = attributeAsString_(attributes, s_id); } else if (tag == "sourceFile") { current_id_ = attributeAsString_(attributes, s_id); source_files_[current_id_].setNameOfFile(attributeAsString_(attributes, s_name)); source_files_[current_id_].setPathToFile(attributeAsString_(attributes, s_location)); } else if (tag == "referenceableParamGroupRef") { //call handleCVParam_ with the parent tag for each parameter in the group String ref = attributeAsString_(attributes, s_ref); for (Size i = 0; i < ref_param_[ref].size(); ++i) { handleCVParam_(parent_parent_tag, parent_tag, /* attributeAsString_(attributes, s_cvref), */ ref_param_[ref][i].accession, ref_param_[ref][i].name, ref_param_[ref][i].value, ref_param_[ref][i].unit_accession); } } else if (tag == "scan") { Acquisition tmp; //source file => meta data String source_file_ref; if (optionalAttributeAsString_(source_file_ref, attributes, s_source_file_ref)) { tmp.setMetaValue("source_file_name", source_files_[source_file_ref].getNameOfFile()); tmp.setMetaValue("source_file_path", source_files_[source_file_ref].getPathToFile()); } //external spectrum id => meta data String external_spectrum_id; if (optionalAttributeAsString_(external_spectrum_id, attributes, s_external_spectrum_id)) { tmp.setIdentifier(external_spectrum_id); } //spectrumRef - not really needed //instrumentConfigurationRef - not really needed: why should a scan have a different instrument? String instrument_configuration_ref; if (optionalAttributeAsString_(instrument_configuration_ref, attributes, s_instrument_configuration_ref)) { warning(LOAD, "Unhandled attribute 'instrumentConfigurationRef' in 'scan' tag."); } spec_.getAcquisitionInfo().push_back(tmp); } else if (tag == "mzML") { scan_count = 0; chromatogram_count = 0; //check file version against schema version String file_version = attributeAsString_(attributes, s_version); VersionInfo::VersionDetails current_version = VersionInfo::VersionDetails::create(file_version); static VersionInfo::VersionDetails mzML_min_version = VersionInfo::VersionDetails::create("1.1.0"); if (current_version == VersionInfo::VersionDetails::EMPTY) { warning(LOAD, String("Invalid mzML version string '") + file_version + "'. Assuming mzML version " + version_ + "!"); } else { if (current_version < mzML_min_version) { fatalError(LOAD, String("Only mzML 1.1.0 or higher is supported! This file has version '") + file_version + "'."); } else if (current_version > VersionInfo::VersionDetails::create(version_)) { warning(LOAD, "The mzML file version (" + file_version + ") is newer than the parser version (" + version_ + "). This might lead to undefined behavior."); } } //handle file accession String accession; if (optionalAttributeAsString_(accession, attributes, s_accession)) { exp_->setIdentifier(accession); } //handle file id String id; if (optionalAttributeAsString_(id, attributes, s_id)) { exp_->setMetaValue("mzml_id", id); } } else if (tag == "contact") { exp_->getContacts().push_back(ContactPerson()); } else if (tag == "sample") { current_id_ = attributeAsString_(attributes, s_id); String name; if (optionalAttributeAsString_(name, attributes, s_name)) { samples_[current_id_].setName(name); } } else if (tag == "run") { //sample String sample_ref; if (optionalAttributeAsString_(sample_ref, attributes, s_sample_ref)) { exp_->setSample(samples_[sample_ref]); } //instrument String instrument_ref = attributeAsString_(attributes, s_default_instrument_configuration_ref); exp_->setInstrument(instruments_[instrument_ref]); //start time String start_time; if (optionalAttributeAsString_(start_time, attributes, s_start_time_stamp)) { exp_->setDateTime(asDateTime_(start_time)); } //defaultSourceFileRef String default_source_file_ref; if (optionalAttributeAsString_(default_source_file_ref, attributes, s_default_source_file_ref)) { exp_->getSourceFiles().push_back(source_files_[default_source_file_ref]); } } else if (tag == "software") { current_id_ = attributeAsString_(attributes, s_id); software_[current_id_].setVersion(attributeAsString_(attributes, s_version)); } else if (tag == "dataProcessing") { current_id_ = attributeAsString_(attributes, s_id); } else if (tag == "processingMethod") { DataProcessing dp; // See ticket 452: Do NOT remove this try/catch block until foreign // software (e.g. ProteoWizard msconvert.exe) produces valid mzML. try { dp.setSoftware(software_[attributeAsString_(attributes, s_software_ref)]); } catch (Exception::ParseError& /*e*/) { LOG_ERROR << "Warning: Parsing error, \"processingMethod\" is missing the required attribute \"softwareRef\".\n" << "The software tool which generated this mzML should be fixed. Please notify the maintainers." << std::endl; } processing_[current_id_].push_back(dp); //The order of processing methods is currently ignored } else if (tag == "instrumentConfiguration") { current_id_ = attributeAsString_(attributes, s_id); //scan settings String scan_settings_ref; if (optionalAttributeAsString_(scan_settings_ref, attributes, s_scan_settings_ref)) { warning(LOAD, "Unhandled attribute 'scanSettingsRef' in 'instrumentConfiguration' tag."); } } else if (tag == "softwareRef") { //Set the software of the instrument instruments_[current_id_].setSoftware(software_[attributeAsString_(attributes, s_ref)]); } else if (tag == "source") { instruments_[current_id_].getIonSources().push_back(IonSource()); instruments_[current_id_].getIonSources().back().setOrder(attributeAsInt_(attributes, s_order)); } else if (tag == "analyzer") { instruments_[current_id_].getMassAnalyzers().push_back(MassAnalyzer()); instruments_[current_id_].getMassAnalyzers().back().setOrder(attributeAsInt_(attributes, s_order)); } else if (tag == "detector") { instruments_[current_id_].getIonDetectors().push_back(IonDetector()); instruments_[current_id_].getIonDetectors().back().setOrder(attributeAsInt_(attributes, s_order)); } else if (tag == "precursor") { if (in_spectrum_list_) { //initialize spec_.getPrecursors().push_back(Precursor()); //source file => meta data String source_file_ref; if (optionalAttributeAsString_(source_file_ref, attributes, s_source_file_ref)) { spec_.getPrecursors().back().setMetaValue("source_file_name", source_files_[source_file_ref].getNameOfFile()); spec_.getPrecursors().back().setMetaValue("source_file_path", source_files_[source_file_ref].getPathToFile()); } //external spectrum id => meta data String external_spectrum_id; if (optionalAttributeAsString_(external_spectrum_id, attributes, s_external_spectrum_id)) { spec_.getPrecursors().back().setMetaValue("external_spectrum_id", external_spectrum_id); } //reset selected ion count selected_ion_count_ = 0; } else { chromatogram_.setPrecursor(Precursor()); String source_file_ref; if (optionalAttributeAsString_(source_file_ref, attributes, s_source_file_ref)) { chromatogram_.getPrecursor().setMetaValue("source_file_name", source_files_[source_file_ref].getNameOfFile()); chromatogram_.getPrecursor().setMetaValue("source_file_path", source_files_[source_file_ref].getPathToFile()); } String external_spectrum_id; if (optionalAttributeAsString_(external_spectrum_id, attributes, s_external_spectrum_id)) { chromatogram_.getPrecursor().setMetaValue("external_spectrum_id", external_spectrum_id); } selected_ion_count_ = 0; } } else if (tag == "product") { //initialize if (in_spectrum_list_) { spec_.getProducts().push_back(Product()); } else { chromatogram_.setProduct(Product()); } } else if (tag == "selectedIon") { //increase selected ion count ++selected_ion_count_; } else if (tag == "selectedIonList") { //Warn if more than one selected ion is present if (attributeAsInt_(attributes, s_count) > 1) { warning(LOAD, "OpenMS can currently handle only one selection ion per precursor! Only the first ion is loaded!"); } } else if (tag == "scanWindow") { spec_.getInstrumentSettings().getScanWindows().push_back(ScanWindow()); } } template <typename MapType> void MzMLHandler<MapType>::endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname) { static const XMLCh* s_spectrum = xercesc::XMLString::transcode("spectrum"); static const XMLCh* s_chromatogram = xercesc::XMLString::transcode("chromatogram"); static const XMLCh* s_spectrum_list = xercesc::XMLString::transcode("spectrumList"); static const XMLCh* s_chromatogram_list = xercesc::XMLString::transcode("chromatogramList"); static const XMLCh* s_mzml = xercesc::XMLString::transcode("mzML"); open_tags_.pop_back(); if (equal_(qname, s_spectrum)) { // catch errors stemming from confusion about elution time and scan time if (!rt_set_ && spec_.metaValueExists("elution time (seconds)")) { spec_.setRT(spec_.getMetaValue("elution time (seconds)")); } /* this is too hot (could be SRM as well? -- check!): // correct spectrum type if possible (i.e., make it more specific) if (spec_.getInstrumentSettings().getScanMode() == InstrumentSettings::MASSSPECTRUM) { if (spec_.getMSLevel() <= 1) spec_.getInstrumentSettings().setScanMode(InstrumentSettings::MS1SPECTRUM); else spec_.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM); } */ if (!skip_spectrum_) { spectrum_data_.push_back(SpectrumData()); spectrum_data_.back().default_array_length = default_array_length_; spectrum_data_.back().spectrum = spec_; if (options_.getFillData()) { spectrum_data_.back().data = data_; } } if (spectrum_data_.size() >= options_.getMaxDataPoolSize()) { populateSpectraWithData(); } skip_spectrum_ = false; rt_set_ = false; if (options_.getSizeOnly()) {skip_spectrum_ = true; } logger_.setProgress(++scan_count); data_.clear(); default_array_length_ = 0; } else if (equal_(qname, s_chromatogram)) { if (!skip_chromatogram_) { chromatogram_data_.push_back(ChromatogramData()); chromatogram_data_.back().default_array_length = default_array_length_; chromatogram_data_.back().chromatogram = chromatogram_; if (options_.getFillData()) { chromatogram_data_.back().data = data_; } } if (chromatogram_data_.size() >= options_.getMaxDataPoolSize()) { populateChromatogramsWithData(); } skip_chromatogram_ = false; if (options_.getSizeOnly()) {skip_chromatogram_ = true; } logger_.setProgress(++chromatogram_count); data_.clear(); default_array_length_ = 0; } else if (equal_(qname, s_spectrum_list)) { in_spectrum_list_ = false; logger_.endProgress(); } else if (equal_(qname, s_chromatogram_list)) { in_spectrum_list_ = false; logger_.endProgress(); } else if (equal_(qname, s_mzml)) { ref_param_.clear(); current_id_ = ""; source_files_.clear(); samples_.clear(); software_.clear(); instruments_.clear(); processing_.clear(); // Flush the remaining data populateSpectraWithData(); populateChromatogramsWithData(); } sm_.clear(); } template <typename MapType> void MzMLHandler<MapType>::handleCVParam_(const String& parent_parent_tag, const String& parent_tag, /* const String & cvref, */ const String& accession, const String& name, const String& value, const String& unit_accession) { // the actual value stored in the CVParam // we assume for now that it is a string value, we update the type later on DataValue termValue = value; //Abort on unknown terms if (!cv_.exists(accession)) { //in 'sample' several external CVs are used (Brenda, GO, ...). Do not warn then. if (parent_tag != "sample") { warning(LOAD, String("Unknown cvParam '") + accession + "' in tag '" + parent_tag + "'."); return; } } else { const ControlledVocabulary::CVTerm& term = cv_.getTerm(accession); //obsolete CV terms if (term.obsolete) { warning(LOAD, String("Obsolete CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "'."); } //check if term name and parsed name match String parsed_name = name; parsed_name.trim(); String correct_name = term.name; correct_name.trim(); if (parsed_name != correct_name) { warning(LOAD, String("Name of CV term not correct: '") + term.id + " - " + parsed_name + "' should be '" + correct_name + "'"); } if (term.obsolete) { warning(LOAD, String("Obsolete CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "'."); } //values used in wrong places and wrong value types if (value != "") { if (term.xref_type == ControlledVocabulary::CVTerm::NONE) { //Quality CV does not state value type :( if (!accession.hasPrefix("PATO:")) { warning(LOAD, String("The CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "' must not have a value. The value is '" + value + "'."); } } else { switch (term.xref_type) { //string value can be anything case ControlledVocabulary::CVTerm::XSD_STRING: break; //int value => try casting case ControlledVocabulary::CVTerm::XSD_INTEGER: case ControlledVocabulary::CVTerm::XSD_NEGATIVE_INTEGER: case ControlledVocabulary::CVTerm::XSD_POSITIVE_INTEGER: case ControlledVocabulary::CVTerm::XSD_NON_NEGATIVE_INTEGER: case ControlledVocabulary::CVTerm::XSD_NON_POSITIVE_INTEGER: try { termValue = value.toInt(); } catch (Exception::ConversionError&) { warning(LOAD, String("The CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "' must have an integer value. The value is '" + value + "'."); return; } break; //double value => try casting case ControlledVocabulary::CVTerm::XSD_DECIMAL: try { termValue = value.toDouble(); } catch (Exception::ConversionError&) { warning(LOAD, String("The CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "' must have a floating-point value. The value is '" + value + "'."); return; } break; //date string => try conversion case ControlledVocabulary::CVTerm::XSD_DATE: try { DateTime tmp; tmp.set(value); } catch (Exception::ParseError&) { warning(LOAD, String("The CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "' must be a valid date. The value is '" + value + "'."); return; } break; default: warning(LOAD, String("The CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "' has the unknown value type '" + ControlledVocabulary::CVTerm::getXRefTypeName(term.xref_type) + "'."); break; } } } //no value, although there should be a numerical value else if (term.xref_type != ControlledVocabulary::CVTerm::NONE && term.xref_type != ControlledVocabulary::CVTerm::XSD_STRING) { warning(LOAD, String("The CV term '") + accession + " - " + term.name + "' used in tag '" + parent_tag + "' should have a numerical value. The value is '" + value + "'."); return; } } if (unit_accession != "") termValue.setUnit(unit_accession); //------------------------- run ---------------------------- if (parent_tag == "run") { //MS:1000857 ! run attribute if (accession == "MS:1000858") //fraction identifier { exp_->setFractionIdentifier(value); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- binaryDataArray ---------------------------- else if (parent_tag == "binaryDataArray") { if (!MzMLHandlerHelper::handleBinaryDataArrayCVParam(data_, accession, value, name)) { if (cv_.isChildOf(accession, "MS:1000513")) //other array names as string { data_.back().meta.setName(cv_.getTerm(accession).name); } else { warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } } } //------------------------- spectrum ---------------------------- else if (parent_tag == "spectrum") { //spectrum type if (accession == "MS:1000294") //mass spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM); } else if (accession == "MS:1000579") //MS1 spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::MS1SPECTRUM); } else if (accession == "MS:1000580") //MSn spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM); } else if (accession == "MS:1000581") //CRM spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::CRM); } else if (accession == "MS:1000582") //SIM spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::SIM); } else if (accession == "MS:1000583") //SRM spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::SRM); } else if (accession == "MS:1000804") //electromagnetic radiation spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::EMR); } else if (accession == "MS:1000805") //emission spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::EMISSION); } else if (accession == "MS:1000806") //absorption spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::ABSORBTION); } else if (accession == "MS:1000325") //constant neutral gain spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::CNG); } else if (accession == "MS:1000326") //constant neutral loss spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::CNL); } else if (accession == "MS:1000341") //precursor ion spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::PRECURSOR); } else if (accession == "MS:1000789") //enhanced multiply charged spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::EMC); } else if (accession == "MS:1000790") //time-delayed fragmentation spectrum { spec_.getInstrumentSettings().setScanMode(InstrumentSettings::TDF); } //spectrum representation else if (accession == "MS:1000127") //centroid spectrum { spec_.setType(SpectrumSettings::PEAKS); } else if (accession == "MS:1000128") //profile spectrum { spec_.setType(SpectrumSettings::RAWDATA); } else if (accession == "MS:1000525") //spectrum representation { spec_.setType(SpectrumSettings::UNKNOWN); } //spectrum attribute else if (accession == "MS:1000511") //ms level { spec_.setMSLevel(value.toInt()); if (options_.hasMSLevels() && !options_.containsMSLevel(spec_.getMSLevel())) { skip_spectrum_ = true; } } else if (accession == "MS:1000497") //zoom scan { spec_.getInstrumentSettings().setZoomScan(true); } else if (accession == "MS:1000285") //total ion current { //No member => meta data spec_.setMetaValue("total ion current", termValue); } else if (accession == "MS:1000504") //base peak m/z { //No member => meta data spec_.setMetaValue("base peak m/z", termValue); } else if (accession == "MS:1000505") //base peak intensity { //No member => meta data spec_.setMetaValue("base peak intensity", termValue); } else if (accession == "MS:1000527") //highest observed m/z { //No member => meta data spec_.setMetaValue("highest observed m/z", termValue); } else if (accession == "MS:1000528") //lowest observed m/z { //No member => meta data spec_.setMetaValue("lowest observed m/z", termValue); } else if (accession == "MS:1000618") //highest observed wavelength { //No member => meta data spec_.setMetaValue("highest observed wavelength", termValue); } else if (accession == "MS:1000619") //lowest observed wavelength { //No member => meta data spec_.setMetaValue("lowest observed wavelength", termValue); } else if (accession == "MS:1000796") //spectrum title { //No member => meta data spec_.setMetaValue("spectrum title", termValue); } else if (accession == "MS:1000797") //peak list scans { //No member => meta data spec_.setMetaValue("peak list scans", termValue); } else if (accession == "MS:1000798") //peak list raw scans { //No member => meta data spec_.setMetaValue("peak list raw scans", termValue); } //scan polarity else if (accession == "MS:1000129") //negative scan { spec_.getInstrumentSettings().setPolarity(IonSource::NEGATIVE); } else if (accession == "MS:1000130") //positive scan { spec_.getInstrumentSettings().setPolarity(IonSource::POSITIVE); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- scanWindow ---------------------------- else if (parent_tag == "scanWindow") { if (accession == "MS:1000501") //scan window lower limit { spec_.getInstrumentSettings().getScanWindows().back().begin = value.toDouble(); } else if (accession == "MS:1000500") //scan window upper limit { spec_.getInstrumentSettings().getScanWindows().back().end = value.toDouble(); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- referenceableParamGroup ---------------------------- else if (parent_tag == "referenceableParamGroup") { SemanticValidator::CVTerm term; term.accession = accession; term.name = name; term.value = value; term.unit_accession = unit_accession; ref_param_[current_id_].push_back(term); } //------------------------- selectedIon ---------------------------- else if (parent_tag == "selectedIon") { //parse only the first selected ion if (selected_ion_count_ > 1) return; if (accession == "MS:1000744") //selected ion m/z { //this overwrites the m/z of the isolation window, as it is probably more accurate if (in_spectrum_list_) { spec_.getPrecursors().back().setMZ(value.toDouble()); } else { chromatogram_.getPrecursor().setMZ(value.toDouble()); } } else if (accession == "MS:1000041") //charge state { if (in_spectrum_list_) { spec_.getPrecursors().back().setCharge(value.toInt()); } else { chromatogram_.getPrecursor().setCharge(value.toInt()); } } else if (accession == "MS:1000042") //peak intensity { if (in_spectrum_list_) { spec_.getPrecursors().back().setIntensity(value.toDouble()); } else { chromatogram_.getPrecursor().setIntensity(value.toDouble()); } } else if (accession == "MS:1000633") //possible charge state { if (in_spectrum_list_) { spec_.getPrecursors().back().getPossibleChargeStates().push_back(value.toInt()); } else { chromatogram_.getPrecursor().getPossibleChargeStates().push_back(value.toInt()); } } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- activation ---------------------------- else if (parent_tag == "activation") { //precursor activation attribute if (in_spectrum_list_) { if (accession == "MS:1000245") //charge stripping { //No member => meta data spec_.getPrecursors().back().setMetaValue("charge stripping", String("true")); } else if (accession == "MS:1000045") //collision energy (ev) { //No member => meta data spec_.getPrecursors().back().setMetaValue("collision energy", termValue); } else if (accession == "MS:1000412") //buffer gas { //No member => meta data spec_.getPrecursors().back().setMetaValue("buffer gas", termValue); } else if (accession == "MS:1000419") //collision gas { //No member => meta data spec_.getPrecursors().back().setMetaValue("collision gas", termValue); } else if (accession == "MS:1000509") //activation energy (ev) { spec_.getPrecursors().back().setActivationEnergy(value.toDouble()); } else if (accession == "MS:1000138") //percent collision energy { //No member => meta data spec_.getPrecursors().back().setMetaValue("percent collision energy", termValue); } else if (accession == "MS:1000869") //collision gas pressure { //No member => meta data spec_.getPrecursors().back().setMetaValue("collision gas pressure", termValue); } //dissociation method else if (accession == "MS:1000044") //dissociation method { //nothing to do here } else if (accession == "MS:1000133") //collision-induced dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::CID); } else if (accession == "MS:1000134") //plasma desorption { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::PD); } else if (accession == "MS:1000135") //post-source decay { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::PSD); } else if (accession == "MS:1000136") //surface-induced dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::SID); } else if (accession == "MS:1000242") //blackbody infrared radiative dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::BIRD); } else if (accession == "MS:1000250") //electron capture dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::ECD); } else if (accession == "MS:1000262") //infrared multiphoton dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::IMD); } else if (accession == "MS:1000282") //sustained off-resonance irradiation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::SORI); } else if (accession == "MS:1000422") //high-energy collision-induced dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::HCID); } else if (accession == "MS:1000433") //low-energy collision-induced dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::LCID); } else if (accession == "MS:1000435") //photodissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::PHD); } else if (accession == "MS:1000598") //electron transfer dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::ETD); } else if (accession == "MS:1000599") //pulsed q dissociation { spec_.getPrecursors().back().getActivationMethods().insert(Precursor::PQD); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else { if (accession == "MS:1000245") //charge stripping { //No member => meta data chromatogram_.getPrecursor().setMetaValue("charge stripping", String("true")); } else if (accession == "MS:1000045") //collision energy (ev) { //No member => meta data chromatogram_.getPrecursor().setMetaValue("collision energy", termValue); } else if (accession == "MS:1000412") //buffer gas { //No member => meta data chromatogram_.getPrecursor().setMetaValue("buffer gas", termValue); } else if (accession == "MS:1000419") //collision gas { //No member => meta data chromatogram_.getPrecursor().setMetaValue("collision gas", termValue); } else if (accession == "MS:1000509") //activation energy (ev) { chromatogram_.getPrecursor().setActivationEnergy(value.toDouble()); } else if (accession == "MS:1000138") //percent collision energy { //No member => meta data chromatogram_.getPrecursor().setMetaValue("percent collision energy", termValue); } else if (accession == "MS:1000869") //collision gas pressure { //No member => meta data chromatogram_.getPrecursor().setMetaValue("collision gas pressure", termValue); } //dissociation method else if (accession == "MS:1000044") //dissociation method { //nothing to do here } else if (accession == "MS:1000133") //collision-induced dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::CID); } else if (accession == "MS:1000134") //plasma desorption { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::PD); } else if (accession == "MS:1000135") //post-source decay { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::PSD); } else if (accession == "MS:1000136") //surface-induced dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::SID); } else if (accession == "MS:1000242") //blackbody infrared radiative dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::BIRD); } else if (accession == "MS:1000250") //electron capture dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::ECD); } else if (accession == "MS:1000262") //infrared multiphoton dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::IMD); } else if (accession == "MS:1000282") //sustained off-resonance irradiation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::SORI); } else if (accession == "MS:1000422") //high-energy collision-induced dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::HCID); } else if (accession == "MS:1000433") //low-energy collision-induced dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::LCID); } else if (accession == "MS:1000435") //photodissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::PHD); } else if (accession == "MS:1000598") //electron transfer dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::ETD); } else if (accession == "MS:1000599") //pulsed q dissociation { chromatogram_.getPrecursor().getActivationMethods().insert(Precursor::PQD); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } } //------------------------- isolationWindow ---------------------------- else if (parent_tag == "isolationWindow") { if (parent_parent_tag == "precursor") { if (accession == "MS:1000827") //isolation window target m/z { if (in_spectrum_list_) { spec_.getPrecursors().back().setMZ(value.toDouble()); } else { chromatogram_.getPrecursor().setMZ(value.toDouble()); } } else if (accession == "MS:1000828") //isolation window lower offset { if (in_spectrum_list_) { spec_.getPrecursors().back().setIsolationWindowLowerOffset(value.toDouble()); } else { chromatogram_.getPrecursor().setIsolationWindowLowerOffset(value.toDouble()); } } else if (accession == "MS:1000829") //isolation window upper offset { if (in_spectrum_list_) { spec_.getPrecursors().back().setIsolationWindowUpperOffset(value.toDouble()); } else { chromatogram_.getPrecursor().setIsolationWindowUpperOffset(value.toDouble()); } } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_parent_tag == "product") { if (accession == "MS:1000827") //isolation window target m/z { if (in_spectrum_list_) { spec_.getProducts().back().setMZ(value.toDouble()); } else { chromatogram_.getProduct().setMZ(value.toDouble()); } } else if (accession == "MS:1000829") //isolation window upper offset { if (in_spectrum_list_) { spec_.getProducts().back().setIsolationWindowUpperOffset(value.toDouble()); } else { chromatogram_.getProduct().setIsolationWindowUpperOffset(value.toDouble()); } } else if (accession == "MS:1000828") //isolation window lower offset { if (in_spectrum_list_) { spec_.getProducts().back().setIsolationWindowLowerOffset(value.toDouble()); } else { chromatogram_.getProduct().setIsolationWindowLowerOffset(value.toDouble()); } } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } } //------------------------- scanList ---------------------------- else if (parent_tag == "scanList") { if (cv_.isChildOf(accession, "MS:1000570")) //method of combination as string { spec_.getAcquisitionInfo().setMethodOfCombination(cv_.getTerm(accession).name); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- scan ---------------------------- else if (parent_tag == "scan") { //scan attributes if (accession == "MS:1000502") //dwell time { //No member => meta data spec_.setMetaValue("dwell time", termValue); } else if (accession == "MS:1000011") //mass resolution { //No member => meta data spec_.setMetaValue("mass resolution", termValue); } else if (accession == "MS:1000015") //scan rate { //No member => meta data spec_.setMetaValue("scan rate", termValue); } else if (accession == "MS:1000016") //scan start time { if (unit_accession == "UO:0000031") //minutes { spec_.setRT(60.0 * value.toDouble()); rt_set_ = true; } else //seconds { spec_.setRT(value.toDouble()); rt_set_ = true; } if (options_.hasRTRange() && !options_.getRTRange().encloses(DPosition<1>(spec_.getRT()))) { skip_spectrum_ = true; } } else if (accession == "MS:1000826") //elution time { if (unit_accession == "UO:0000031") //minutes { spec_.setMetaValue("elution time (seconds)", 60.0 * value.toDouble()); } else //seconds { spec_.setMetaValue("elution time (seconds)", value.toDouble()); } } else if (accession == "MS:1000512") //filter string { //No member => meta data spec_.setMetaValue("filter string", termValue); } else if (accession == "MS:1000803") //analyzer scan offset { //No member => meta data spec_.setMetaValue("analyzer scan offset", termValue); // used in SpectraIdentificationViewWidget() } else if (accession == "MS:1000616") //preset scan configuration { //No member => meta data spec_.setMetaValue("preset scan configuration", termValue); } else if (accession == "MS:1000800") //mass resolving power { //No member => meta data spec_.setMetaValue("mass resolving power", termValue); } else if (accession == "MS:1000880") //interchannel delay { //No member => meta data spec_.setMetaValue("interchannel delay", termValue); } //scan direction else if (accession == "MS:1000092") //decreasing m/z scan { //No member => meta data spec_.setMetaValue("scan direction", String("decreasing")); } else if (accession == "MS:1000093") //increasing m/z scan { //No member => meta data spec_.setMetaValue("scan direction", String("increasing")); } //scan law else if (accession == "MS:1000094") //scan law: exponential { //No member => meta data spec_.setMetaValue("scan law", String("exponential")); } else if (accession == "MS:1000095") //scan law: linear { //No member => meta data spec_.setMetaValue("scan law", String("linear")); } else if (accession == "MS:1000096") //scan law: quadratic { //No member => meta data spec_.setMetaValue("scan law", String("quadratic")); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- contact ---------------------------- else if (parent_tag == "contact") { if (accession == "MS:1000586") //contact name { exp_->getContacts().back().setName(value); } else if (accession == "MS:1000587") //contact address { exp_->getContacts().back().setAddress(value); } else if (accession == "MS:1000588") //contact URL { exp_->getContacts().back().setURL(value); } else if (accession == "MS:1000589") //contact email { exp_->getContacts().back().setEmail(value); } else if (accession == "MS:1000590") //contact organization { exp_->getContacts().back().setInstitution(value); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- sourceFile ---------------------------- else if (parent_tag == "sourceFile") { if (accession == "MS:1000569") //SHA-1 checksum { source_files_[current_id_].setChecksum(value, SourceFile::SHA1); } else if (accession == "MS:1000568") //MD5 checksum { source_files_[current_id_].setChecksum(value, SourceFile::MD5); } else if (cv_.isChildOf(accession, "MS:1000560")) //source file type as string { source_files_[current_id_].setFileType(cv_.getTerm(accession).name); } else if (cv_.isChildOf(accession, "MS:1000767")) //native spectrum identifier format as string { source_files_[current_id_].setNativeIDType(cv_.getTerm(accession).name); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- sample ---------------------------- else if (parent_tag == "sample") { if (accession == "MS:1000004") //sample mass (gram) { samples_[current_id_].setMass(value.toDouble()); } else if (accession == "MS:1000001") //sample number { samples_[current_id_].setNumber(value); } else if (accession == "MS:1000005") //sample volume (milliliter) { samples_[current_id_].setVolume(value.toDouble()); } else if (accession == "MS:1000006") //sample concentration (gram per liter) { samples_[current_id_].setConcentration(value.toDouble()); } else if (accession == "MS:1000053") //sample batch { //No member => meta data samples_[current_id_].setMetaValue("sample batch", termValue); } else if (accession == "MS:1000047") //emulsion { samples_[current_id_].setState(Sample::EMULSION); } else if (accession == "MS:1000048") //gas { samples_[current_id_].setState(Sample::GAS); } else if (accession == "MS:1000049") //liquid { samples_[current_id_].setState(Sample::LIQUID); } else if (accession == "MS:1000050") //solid { samples_[current_id_].setState(Sample::SOLID); } else if (accession == "MS:1000051") //solution { samples_[current_id_].setState(Sample::SOLUTION); } else if (accession == "MS:1000052") //suspension { samples_[current_id_].setState(Sample::SUSPENSION); } else if (accession.hasPrefix("PATO:")) //quality of an object { //No member => meta data samples_[current_id_].setMetaValue(String(name), termValue); } else if (accession.hasPrefix("GO:")) //cellular_component { //No member => meta data samples_[current_id_].setMetaValue("GO cellular component", String(name)); } else if (accession.hasPrefix("BTO:")) //brenda source tissue ontology { //No member => meta data samples_[current_id_].setMetaValue("brenda source tissue", String(name)); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //------------------------- instrumentConfiguration ---------------------------- else if (parent_tag == "instrumentConfiguration") { //instrument model if (accession == "MS:1000031") { //unknown instrument => nothing to do } else if (cv_.isChildOf(accession, "MS:1000031")) //instrument name as string { instruments_[current_id_].setName(cv_.getTerm(accession).name); } //instrument attribute else if (accession == "MS:1000529") //instrument serial number { //No member => meta data instruments_[current_id_].setMetaValue("instrument serial number", termValue); } else if (accession == "MS:1000032") //customization { instruments_[current_id_].setCustomizations(value); } else if (accession == "MS:1000236") //transmission { //No member => metadata instruments_[current_id_].setMetaValue("transmission", termValue); } //ion optics type else if (accession == "MS:1000246") //delayed extraction { instruments_[current_id_].setIonOptics(Instrument::DELAYED_EXTRACTION); } else if (accession == "MS:1000221") //magnetic deflection { instruments_[current_id_].setIonOptics(Instrument::MAGNETIC_DEFLECTION); } else if (accession == "MS:1000275") //collision quadrupole { instruments_[current_id_].setIonOptics(Instrument::COLLISION_QUADRUPOLE); } else if (accession == "MS:1000281") //selected ion flow tube { instruments_[current_id_].setIonOptics(Instrument::SELECTED_ION_FLOW_TUBE); } else if (accession == "MS:1000286") //time lag focusing { instruments_[current_id_].setIonOptics(Instrument::TIME_LAG_FOCUSING); } else if (accession == "MS:1000300") //reflectron { instruments_[current_id_].setIonOptics(Instrument::REFLECTRON); } else if (accession == "MS:1000307") //einzel lens { instruments_[current_id_].setIonOptics(Instrument::EINZEL_LENS); } else if (accession == "MS:1000309") //first stability region { instruments_[current_id_].setIonOptics(Instrument::FIRST_STABILITY_REGION); } else if (accession == "MS:1000310") //fringing field { instruments_[current_id_].setIonOptics(Instrument::FRINGING_FIELD); } else if (accession == "MS:1000311") //kinetic energy analyzer { instruments_[current_id_].setIonOptics(Instrument::KINETIC_ENERGY_ANALYZER); } else if (accession == "MS:1000320") //static field { instruments_[current_id_].setIonOptics(Instrument::STATIC_FIELD); } //ion optics attribute else if (accession == "MS:1000304") //accelerating voltage { //No member => metadata instruments_[current_id_].setMetaValue("accelerating voltage", termValue); } else if (accession == "MS:1000216") //field-free region { //No member => metadata instruments_[current_id_].setMetaValue("field-free region", String("true")); } else if (accession == "MS:1000308") //electric field strength { //No member => metadata instruments_[current_id_].setMetaValue("electric field strength", termValue); } else if (accession == "MS:1000319") //space charge effect { //No member => metadata instruments_[current_id_].setMetaValue("space charge effect", String("true")); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "source") { //inlet type if (accession == "MS:1000055") //continuous flow fast atom bombardment { instruments_[current_id_].getIonSources().back().setInletType(IonSource::CONTINUOUSFLOWFASTATOMBOMBARDMENT); } else if (accession == "MS:1000056") //direct inlet { instruments_[current_id_].getIonSources().back().setInletType(IonSource::DIRECT); } else if (accession == "MS:1000057") //electrospray inlet { instruments_[current_id_].getIonSources().back().setInletType(IonSource::ELECTROSPRAYINLET); } else if (accession == "MS:1000058") //flow injection analysis { instruments_[current_id_].getIonSources().back().setInletType(IonSource::FLOWINJECTIONANALYSIS); } else if (accession == "MS:1000059") //inductively coupled plasma { instruments_[current_id_].getIonSources().back().setInletType(IonSource::INDUCTIVELYCOUPLEDPLASMA); } else if (accession == "MS:1000060") //infusion { instruments_[current_id_].getIonSources().back().setInletType(IonSource::INFUSION); } else if (accession == "MS:1000061") //jet separator { instruments_[current_id_].getIonSources().back().setInletType(IonSource::JETSEPARATOR); } else if (accession == "MS:1000062") //membrane separator { instruments_[current_id_].getIonSources().back().setInletType(IonSource::MEMBRANESEPARATOR); } else if (accession == "MS:1000063") //moving belt { instruments_[current_id_].getIonSources().back().setInletType(IonSource::MOVINGBELT); } else if (accession == "MS:1000064") //moving wire { instruments_[current_id_].getIonSources().back().setInletType(IonSource::MOVINGWIRE); } else if (accession == "MS:1000065") //open split { instruments_[current_id_].getIonSources().back().setInletType(IonSource::OPENSPLIT); } else if (accession == "MS:1000066") //particle beam { instruments_[current_id_].getIonSources().back().setInletType(IonSource::PARTICLEBEAM); } else if (accession == "MS:1000067") //reservoir { instruments_[current_id_].getIonSources().back().setInletType(IonSource::RESERVOIR); } else if (accession == "MS:1000068") //septum { instruments_[current_id_].getIonSources().back().setInletType(IonSource::SEPTUM); } else if (accession == "MS:1000069") //thermospray inlet { instruments_[current_id_].getIonSources().back().setInletType(IonSource::THERMOSPRAYINLET); } else if (accession == "MS:1000248") //direct insertion probe { instruments_[current_id_].getIonSources().back().setInletType(IonSource::BATCH); } else if (accession == "MS:1000249") //direct liquid introduction { instruments_[current_id_].getIonSources().back().setInletType(IonSource::CHROMATOGRAPHY); } else if (accession == "MS:1000396") //membrane inlet { instruments_[current_id_].getIonSources().back().setInletType(IonSource::MEMBRANE); } else if (accession == "MS:1000485") //nanospray inlet { instruments_[current_id_].getIonSources().back().setInletType(IonSource::NANOSPRAY); } //ionization type else if (accession == "MS:1000071") //chemical ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::CI); } else if (accession == "MS:1000073") //electrospray ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::ESI); } else if (accession == "MS:1000074") //fast atom bombardment ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::FAB); } else if (accession == "MS:1000227") //multiphoton ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::MPI); } else if (accession == "MS:1000240") //atmospheric pressure ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::API); } else if (accession == "MS:1000247") //desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::DI); } else if (accession == "MS:1000255") //flowing afterglow { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::FA); } else if (accession == "MS:1000258") //field ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::FII); } else if (accession == "MS:1000259") //glow discharge ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::GD_MS); } else if (accession == "MS:1000271") //Negative ion chemical ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::NICI); } else if (accession == "MS:1000272") //neutralization reionization mass spectrometry { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::NRMS); } else if (accession == "MS:1000273") //photoionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::PI); } else if (accession == "MS:1000274") //pyrolysis mass spectrometry { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::PYMS); } else if (accession == "MS:1000276") //resonance enhanced multiphoton ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::REMPI); } else if (accession == "MS:1000380") //adiabatic ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::AI); } else if (accession == "MS:1000381") //associative ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::ASI); } else if (accession == "MS:1000383") //autodetachment { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::AD); } else if (accession == "MS:1000384") //autoionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::AUI); } else if (accession == "MS:1000385") //charge exchange ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::CEI); } else if (accession == "MS:1000386") //chemi-ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::CHEMI); } else if (accession == "MS:1000388") //dissociative ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::DISSI); } else if (accession == "MS:1000389") //electron ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::EI); } else if (accession == "MS:1000395") //liquid secondary ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::LSI); } else if (accession == "MS:1000399") //penning ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::PEI); } else if (accession == "MS:1000400") //plasma desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::PD); } else if (accession == "MS:1000402") //secondary ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SI); } else if (accession == "MS:1000403") //soft ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SOI); } else if (accession == "MS:1000404") //spark ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SPI); } else if (accession == "MS:1000406") //surface ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SUI); } else if (accession == "MS:1000407") //thermal ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::TI); } else if (accession == "MS:1000408") //vertical ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::VI); } else if (accession == "MS:1000446") //fast ion bombardment { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::FIB); } else if (accession == "MS:1000070") //atmospheric pressure chemical ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::APCI); } else if (accession == "MS:1000239") //atmospheric pressure matrix-assisted laser desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::AP_MALDI); } else if (accession == "MS:1000382") //atmospheric pressure photoionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::APPI); } else if (accession == "MS:1000075") //matrix-assisted laser desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::MALDI); } else if (accession == "MS:1000257") //field desorption { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::FD); } else if (accession == "MS:1000387") //desorption/ionization on silicon { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SILI); } else if (accession == "MS:1000393") //laser desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::LD); } else if (accession == "MS:1000405") //surface-assisted laser desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SALDI); } else if (accession == "MS:1000397") //microelectrospray { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::MESI); } else if (accession == "MS:1000398") //nanoelectrospray { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::NESI); } else if (accession == "MS:1000278") //surface enhanced laser desorption ionization { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SELDI); } else if (accession == "MS:1000279") //surface enhanced neat desorption { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::SEND); } else if (accession == "MS:1000008") //ionization type (base term) { instruments_[current_id_].getIonSources().back().setIonizationMethod(IonSource::IONMETHODNULL); } //source attribute else if (accession == "MS:1000392") //ionization efficiency { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("ionization efficiency", termValue); } else if (accession == "MS:1000486") //source potential { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("source potential", termValue); } else if (accession == "MS:1000875") // declustering potential { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("declustering potential", termValue); } else if (accession == "MS:1000876") // cone voltage { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("cone voltage", termValue); } else if (accession == "MS:1000877") // tube lens { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("tube lens", termValue); } //laser attribute else if (accession == "MS:1000843") // wavelength { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("wavelength", termValue); } else if (accession == "MS:1000844") // focus diameter x { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("focus diameter x", termValue); } else if (accession == "MS:1000845") // focus diameter y { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("focus diameter y", termValue); } else if (accession == "MS:1000846") // pulse energy { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("pulse energy", termValue); } else if (accession == "MS:1000847") // pulse duration { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("pulse duration", termValue); } else if (accession == "MS:1000848") // attenuation { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("attenuation", termValue); } else if (accession == "MS:1000849") // impact angle { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("impact angle", termValue); } //laser type else if (accession == "MS:1000850") // gas laser { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("laser type", "gas laser"); } else if (accession == "MS:1000851") // solid-state laser { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("laser type", "solid-state laser"); } else if (accession == "MS:1000852") // dye-laser { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("laser type", "dye-laser"); } else if (accession == "MS:1000853") // free electron laser { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("laser type", "free electron laser"); } //MALDI matrix application else if (accession == "MS:1000834") // matrix solution { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("matrix solution", termValue); } else if (accession == "MS:1000835") // matrix solution concentration { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("matrix solution concentration", termValue); } // matrix application type else if (accession == "MS:1000836") // dried dropplet { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("matrix application type", "dried dropplet"); } else if (accession == "MS:1000837") // printed { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("matrix application type", "printed"); } else if (accession == "MS:1000838") // sprayed { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("matrix application type", "sprayed"); } else if (accession == "MS:1000839") // precoated plate { //No member => meta data instruments_[current_id_].getIonSources().back().setMetaValue("matrix application type", " precoated plate"); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "analyzer") { //mass analyzer type if (accession == "MS:1000079") //fourier transform ion cyclotron resonance mass spectrometer { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::FOURIERTRANSFORM); } else if (accession == "MS:1000080") //magnetic sector { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::SECTOR); } else if (accession == "MS:1000081") //quadrupole { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::QUADRUPOLE); } else if (accession == "MS:1000084") //time-of-flight { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::TOF); } else if (accession == "MS:1000254") //electrostatic energy analyzer { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::ESA); } else if (accession == "MS:1000264") //ion trap { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::IT); } else if (accession == "MS:1000284") //stored waveform inverse fourier transform { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::SWIFT); } else if (accession == "MS:1000288") //cyclotron { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::CYCLOTRON); } else if (accession == "MS:1000484") //orbitrap { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::ORBITRAP); } else if (accession == "MS:1000078") //axial ejection linear ion trap { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::AXIALEJECTIONLINEARIONTRAP); } else if (accession == "MS:1000082") //quadrupole ion trap { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::PAULIONTRAP); } else if (accession == "MS:1000083") //radial ejection linear ion trap { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::RADIALEJECTIONLINEARIONTRAP); } else if (accession == "MS:1000291") //linear ion trap { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::LIT); } else if (accession == "MS:1000443") //mass analyzer type (base term) { instruments_[current_id_].getMassAnalyzers().back().setType(MassAnalyzer::ANALYZERNULL); } //mass analyzer attribute else if (accession == "MS:1000014") //accuracy (ppm) { instruments_[current_id_].getMassAnalyzers().back().setAccuracy(value.toDouble()); } else if (accession == "MS:1000022") //TOF Total Path Length (meter) { instruments_[current_id_].getMassAnalyzers().back().setTOFTotalPathLength(value.toDouble()); } else if (accession == "MS:1000024") //final MS exponent { instruments_[current_id_].getMassAnalyzers().back().setFinalMSExponent(value.toInt()); } else if (accession == "MS:1000025") //magnetic field strength (tesla) { instruments_[current_id_].getMassAnalyzers().back().setMagneticFieldStrength(value.toDouble()); } else if (accession == "MS:1000105") //reflectron off { instruments_[current_id_].getMassAnalyzers().back().setReflectronState(MassAnalyzer::OFF); } else if (accession == "MS:1000106") //reflectron on { instruments_[current_id_].getMassAnalyzers().back().setReflectronState(MassAnalyzer::ON); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "detector") { //detector type if (accession == "MS:1000107") //channeltron { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::CHANNELTRON); } else if (accession == "MS:1000110") //daly detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::DALYDETECTOR); } else if (accession == "MS:1000112") //faraday cup { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::FARADAYCUP); } else if (accession == "MS:1000114") //microchannel plate detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::MICROCHANNELPLATEDETECTOR); } else if (accession == "MS:1000115") //multi-collector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::MULTICOLLECTOR); } else if (accession == "MS:1000116") //photomultiplier { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::PHOTOMULTIPLIER); } else if (accession == "MS:1000253") //electron multiplier { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::ELECTRONMULTIPLIER); } else if (accession == "MS:1000345") //array detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::ARRAYDETECTOR); } else if (accession == "MS:1000346") //conversion dynode { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::CONVERSIONDYNODE); } else if (accession == "MS:1000347") //dynode { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::DYNODE); } else if (accession == "MS:1000348") //focal plane collector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::FOCALPLANECOLLECTOR); } else if (accession == "MS:1000349") //ion-to-photon detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::IONTOPHOTONDETECTOR); } else if (accession == "MS:1000350") //point collector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::POINTCOLLECTOR); } else if (accession == "MS:1000351") //postacceleration detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::POSTACCELERATIONDETECTOR); } else if (accession == "MS:1000621") //photodiode array detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::PHOTODIODEARRAYDETECTOR); } else if (accession == "MS:1000624") //inductive detector { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::INDUCTIVEDETECTOR); } else if (accession == "MS:1000108") //conversion dynode electron multiplier { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::CONVERSIONDYNODEELECTRONMULTIPLIER); } else if (accession == "MS:1000109") //conversion dynode photomultiplier { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::CONVERSIONDYNODEPHOTOMULTIPLIER); } else if (accession == "MS:1000111") //electron multiplier tube { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::ELECTRONMULTIPLIERTUBE); } else if (accession == "MS:1000113") //focal plane array { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::FOCALPLANEARRAY); } else if (accession == "MS:1000026") //detector type (base term) { instruments_[current_id_].getIonDetectors().back().setType(IonDetector::TYPENULL); } //detector attribute else if (accession == "MS:1000028") //detector resolution { instruments_[current_id_].getIonDetectors().back().setResolution(value.toDouble()); } else if (accession == "MS:1000029") //sampling frequency { instruments_[current_id_].getIonDetectors().back().setADCSamplingFrequency(value.toDouble()); } //detector acquisition mode else if (accession == "MS:1000117") //analog-digital converter { instruments_[current_id_].getIonDetectors().back().setAcquisitionMode(IonDetector::ADC); } else if (accession == "MS:1000118") //pulse counting { instruments_[current_id_].getIonDetectors().back().setAcquisitionMode(IonDetector::PULSECOUNTING); } else if (accession == "MS:1000119") //time-digital converter { instruments_[current_id_].getIonDetectors().back().setAcquisitionMode(IonDetector::TDC); } else if (accession == "MS:1000120") //transient recorder { instruments_[current_id_].getIonDetectors().back().setAcquisitionMode(IonDetector::TRANSIENTRECORDER); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "processingMethod") { //data processing parameter if (accession == "MS:1000629") //low intensity threshold (ion count) { processing_[current_id_].back().setMetaValue("low_intensity_threshold", termValue); } else if (accession == "MS:1000631") //high intensity threshold (ion count) { processing_[current_id_].back().setMetaValue("high_intensity_threshold", termValue); } else if (accession == "MS:1000787") //inclusive low intensity threshold { processing_[current_id_].back().setMetaValue("inclusive_low_intensity_threshold", termValue); } else if (accession == "MS:1000788") //inclusive high intensity threshold { processing_[current_id_].back().setMetaValue("inclusive_high_intensity_threshold", termValue); } else if (accession == "MS:1000747") //completion time { processing_[current_id_].back().setCompletionTime(asDateTime_(value)); } //file format conversion else if (accession == "MS:1000530") //file format conversion { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::FORMAT_CONVERSION); } else if (accession == "MS:1000544") //Conversion to mzML { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CONVERSION_MZML); } else if (accession == "MS:1000545") //Conversion to mzXML { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CONVERSION_MZXML); } else if (accession == "MS:1000546") //Conversion to mzData { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CONVERSION_MZDATA); } else if (accession == "MS:1000741") //Conversion to DTA { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CONVERSION_DTA); } //data processing action else if (accession == "MS:1000543") //data processing action { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::DATA_PROCESSING); } else if (accession == "MS:1000033") //deisotoping { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::DEISOTOPING); } else if (accession == "MS:1000034") //charge deconvolution { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CHARGE_DECONVOLUTION); } else if (accession == "MS:1000035" || cv_.isChildOf(accession, "MS:1000035")) //peak picking (or child terms, we make no difference) { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::PEAK_PICKING); } else if (accession == "MS:1000592" || cv_.isChildOf(accession, "MS:1000592")) //smoothing (or child terms, we make no difference) { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::SMOOTHING); } else if (accession == "MS:1000778" || cv_.isChildOf(accession, "MS:1000778")) //charge state calculation (or child terms, we make no difference) { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CHARGE_CALCULATION); } else if (accession == "MS:1000780" || cv_.isChildOf(accession, "MS:1000780")) //precursor recalculation (or child terms, we make no difference) { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::PRECURSOR_RECALCULATION); } else if (accession == "MS:1000593") //baseline reduction { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::BASELINE_REDUCTION); } else if (accession == "MS:1000745") //retention time alignment { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::ALIGNMENT); } else if (accession == "MS:1001484") //intensity normalization { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::NORMALIZATION); } else if (accession == "MS:1001485") //m/z calibration { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::CALIBRATION); } else if (accession == "MS:1001486" || cv_.isChildOf(accession, "MS:1001486")) //data filtering (or child terms, we make no difference) { processing_[current_id_].back().getProcessingActions().insert(DataProcessing::FILTERING); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "fileContent") { if (cv_.isChildOf(accession, "MS:1000524")) //data file content { //ignored //exp_->setMetaValue(name, termValue); } else if (cv_.isChildOf(accession, "MS:1000525")) //spectrum representation { //ignored //exp_->setMetaValue(name, termValue); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "software") { if (cv_.isChildOf(accession, "MS:1000531")) //software as string { if (accession == "MS:1000799") //custom unreleased software tool => use value as name { software_[current_id_].setName(value); } else //use name as name { software_[current_id_].setName(name); } } else { warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } //~ software_[current_id_].addCVTerm( CVTerm (accession, value, const String &cv_identifier_ref, const String &value, const Unit &unit) ); TODO somthing like that } else if (parent_tag == "chromatogram") { if (accession == "MS:1000810") { chromatogram_.setChromatogramType(ChromatogramSettings::MASS_CHROMATOGRAM); } else if (accession == "MS:1000235") { chromatogram_.setChromatogramType(ChromatogramSettings::TOTAL_ION_CURRENT_CHROMATOGRAM); } else if (accession == "MS:1000627") { chromatogram_.setChromatogramType(ChromatogramSettings::SELECTED_ION_CURRENT_CHROMATOGRAM); } else if (accession == "MS:1000628") { chromatogram_.setChromatogramType(ChromatogramSettings::BASEPEAK_CHROMATOGRAM); } else if (accession == "MS:1001472") { chromatogram_.setChromatogramType(ChromatogramSettings::SELECTED_ION_MONITORING_CHROMATOGRAM); } else if (accession == "MS:1001473") { chromatogram_.setChromatogramType(ChromatogramSettings::SELECTED_REACTION_MONITORING_CHROMATOGRAM); } else if (accession == "MS:1001474") { chromatogram_.setChromatogramType(ChromatogramSettings::SELECTED_REACTION_MONITORING_CHROMATOGRAM); } else if (accession == "MS:1000811") { chromatogram_.setChromatogramType(ChromatogramSettings::ELECTROMAGNETIC_RADIATION_CHROMATOGRAM); } else if (accession == "MS:1000812") { chromatogram_.setChromatogramType(ChromatogramSettings::ABSORPTION_CHROMATOGRAM); } else if (accession == "MS:1000813") { chromatogram_.setChromatogramType(ChromatogramSettings::EMISSION_CHROMATOGRAM); } else if (accession == "MS:1000809") { chromatogram_.setName(value); } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } else if (parent_tag == "target") { //allowed but, not needed } else warning(LOAD, String("Unhandled cvParam '") + accession + "' in tag '" + parent_tag + "'."); } template <typename MapType> void MzMLHandler<MapType>::handleUserParam_(const String& parent_parent_tag, const String& parent_tag, const String& name, const String& type, const String& value) { //create a DataValue that contains the data in the right type DataValue data_value; //float type if (type == "xsd:double" || type == "xsd:float") { data_value = DataValue(value.toDouble()); } //integer type else if (type == "xsd:byte" || type == "xsd:decimal" || type == "xsd:int" || type == "xsd:integer" || type == "xsd:long" || type == "xsd:negativeInteger" || type == "xsd:nonNegativeInteger" || type == "xsd:nonPositiveInteger" || type == "xsd:positiveInteger" || type == "xsd:short" || type == "xsd:unsignedByte" || type == "xsd:unsignedInt" || type == "xsd:unsignedLong" || type == "xsd:unsignedShort") { data_value = DataValue(value.toInt()); } //everything else is treated as a string else { data_value = DataValue(value); } //find the right MetaInfoInterface if (parent_tag == "run") { exp_->setMetaValue(name, data_value); } else if (parent_tag == "instrumentConfiguration") { instruments_[current_id_].setMetaValue(name, data_value); } else if (parent_tag == "source") { instruments_[current_id_].getIonSources().back().setMetaValue(name, data_value); } else if (parent_tag == "analyzer") { instruments_[current_id_].getMassAnalyzers().back().setMetaValue(name, data_value); } else if (parent_tag == "detector") { instruments_[current_id_].getIonDetectors().back().setMetaValue(name, data_value); } else if (parent_tag == "sample") { samples_[current_id_].setMetaValue(name, data_value); } else if (parent_tag == "software") { software_[current_id_].setMetaValue(name, data_value); } else if (parent_tag == "contact") { exp_->getContacts().back().setMetaValue(name, data_value); } else if (parent_tag == "sourceFile") { source_files_[current_id_].setMetaValue(name, data_value); } else if (parent_tag == "binaryDataArray") { data_.back().meta.setMetaValue(name, data_value); } else if (parent_tag == "spectrum") { spec_.setMetaValue(name, data_value); } else if (parent_tag == "chromatogram") { chromatogram_.setMetaValue(name, data_value); } else if (parent_tag == "scanList") { spec_.getAcquisitionInfo().setMetaValue(name, data_value); } else if (parent_tag == "scan") { spec_.getAcquisitionInfo().back().setMetaValue(name, data_value); } else if (parent_tag == "scanWindow") { spec_.getInstrumentSettings().getScanWindows().back().setMetaValue(name, data_value); } else if (parent_tag == "isolationWindow") { //We don't have this as a separate location => store it in the precursor if (parent_parent_tag == "precursor") { if (in_spectrum_list_) { spec_.getPrecursors().back().setMetaValue(name, data_value); } else { chromatogram_.getPrecursor().setMetaValue(name, data_value); } } else if (parent_parent_tag == "product") { if (in_spectrum_list_) { spec_.getProducts().back().setMetaValue(name, data_value); } else { chromatogram_.getProduct().setMetaValue(name, data_value); } } } else if (parent_tag == "selectedIon") { //parse only the first selected ion if (selected_ion_count_ > 1) return; //We don't have this as a separate location => store it in the precursor if (in_spectrum_list_) { spec_.getPrecursors().back().setMetaValue(name, data_value); } else { chromatogram_.getPrecursor().setMetaValue(name, data_value); } } else if (parent_tag == "activation") { //We don't have this as a separate location => store it in the precursor if (in_spectrum_list_) { spec_.getPrecursors().back().setMetaValue(name, data_value); } else { chromatogram_.getPrecursor().setMetaValue(name, data_value); } } else if (parent_tag == "processingMethod") { processing_[current_id_].back().setMetaValue(name, data_value); } else if (parent_tag == "fileContent") { //exp_->setMetaValue(name, data_value); } else warning(LOAD, String("Unhandled userParam '") + name + "' in tag '" + parent_tag + "'."); } template <typename MapType> bool MzMLHandler<MapType>::validateCV_(const ControlledVocabulary::CVTerm& c, const String& path, const Internal::MzMLValidator& validator) const { SemanticValidator::CVTerm sc; sc.accession = c.id; sc.name = c.name; sc.has_unit_accession = false; sc.has_unit_name = false; return validator.SemanticValidator::locateTerm(path, sc); } template <typename MapType> String MzMLHandler<MapType>::writeCV_(const ControlledVocabulary::CVTerm& c, const DataValue& metaValue) const { String cvTerm = "<cvParam cvRef=\"" + c.id.prefix(':') + "\" accession=\"" + c.id + "\" name=\"" + c.name; if (!metaValue.isEmpty()) { cvTerm += "\" value=\"" + writeXMLEscape(metaValue.toString()); if (metaValue.hasUnit()) { // unitAccession="UO:0000021" unitName="gram" unitCvRef="UO" ControlledVocabulary::CVTerm unit = cv_.getTerm(metaValue.getUnit()); cvTerm += "\" unitAccession=\"" + unit.id + "\" unitName=\"" + unit.name + "\" unitCvRef=\"" + unit.id.prefix(2); } } cvTerm += "\"/>\n"; return cvTerm; } template <typename MapType> void MzMLHandler<MapType>::writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, UInt indent, String path, Internal::MzMLValidator& validator) const { std::vector<String> cvParams; std::vector<String> userParams; std::vector<String> keys; meta.getKeys(keys); for (std::vector<String>::iterator key = keys.begin(); key != keys.end(); ++key) { // special treatment of GO and BTO terms // <cvParam cvRef="BTO" accession="BTO:0000199" name="cardiac muscle"/> if (*key == "GO cellular component" || *key == "brenda source tissue") { // the CVTerm info is in the value const DataValue& metaValue = meta.getMetaValue(*key); if (cv_.hasTermWithName((String) metaValue)) { ControlledVocabulary::CVTerm c = cv_.getTermByName((String) metaValue); // TODO: validate CV, we currently cannot do this as the relations in the BTO and GO are not captured by our CV impl cvParams.push_back(writeCV_(c, DataValue::EMPTY)); } } else { bool writtenAsCVTerm = false; if (cv_.hasTermWithName(*key)) { ControlledVocabulary::CVTerm c = cv_.getTermByName(*key); // in cv_ write cvparam else write userparam if (validateCV_(c, path, validator)) { // write CV cvParams.push_back(writeCV_(c, meta.getMetaValue(*key))); writtenAsCVTerm = true; } } // if we could not write it as CVTerm we will store it at least as userParam if (!writtenAsCVTerm) { String userParam = "<userParam name=\"" + *key + "\" type=\""; const DataValue& d = meta.getMetaValue(*key); //determine type if (d.valueType() == DataValue::INT_VALUE) { userParam += "xsd:integer"; } else if (d.valueType() == DataValue::DOUBLE_VALUE) { userParam += "xsd:double"; } else //string or lists are converted to string { userParam += "xsd:string"; } userParam += "\" value=\"" + writeXMLEscape(d.toString()) + "\"/>" + "\n"; userParams.push_back(userParam); } } } // write out all the cvParams and userParams in correct order for (std::vector<String>::iterator term = cvParams.begin(); term != cvParams.end(); ++term) { os << String(indent, '\t') << *term; } for (std::vector<String>::iterator term = userParams.begin(); term != userParams.end(); ++term) { os << String(indent, '\t') << *term; } } template <typename MapType> ControlledVocabulary::CVTerm MzMLHandler<MapType>::getChildWithName_(const String& parent_accession, const String& name) const { std::set<String> terms; cv_.getAllChildTerms(terms, parent_accession); for (std::set<String>::const_iterator it = terms.begin(); it != terms.end(); ++it) { if (cv_.getTerm(*it).name == name) { return cv_.getTerm(*it); } } return ControlledVocabulary::CVTerm(); } template <typename MapType> void MzMLHandler<MapType>::writeSoftware_(std::ostream& os, const String& id, const Software& software, Internal::MzMLValidator& validator) { os << "\t\t<software id=\"" << id << "\" version=\"" << software.getVersion() << "\" >\n"; ControlledVocabulary::CVTerm so_term = getChildWithName_("MS:1000531", software.getName()); if (so_term.id == "") { so_term = getChildWithName_("MS:1000531", software.getName() + " software"); //act of desperation to find the right cv and keep compatible with older cv mzmls } if (so_term.id == "") { so_term = getChildWithName_("MS:1000531", "TOPP " + software.getName()); //act of desperation to find the right cv and keep compatible with older cv mzmls } if (so_term.id == "MS:1000799") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000799\" name=\"custom unreleased software tool\" value=\"\" />\n"; } else if (so_term.id != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"" << so_term.id << "\" name=\"" << writeXMLEscape(so_term.name) << "\" />\n"; } else { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000799\" name=\"custom unreleased software tool\" value=\"" << writeXMLEscape(software.getName()) << "\" />\n"; } writeUserParam_(os, software, 3, "/mzML/Software/cvParam/@accession", validator); os << "\t\t</software>\n"; } template <typename MapType> void MzMLHandler<MapType>::writeSourceFile_(std::ostream& os, const String& id, const SourceFile& source_file, Internal::MzMLValidator& validator) { os << "\t\t\t<sourceFile id=\"" << id << "\" name=\"" << writeXMLEscape(source_file.getNameOfFile()) << "\" location=\"" << writeXMLEscape(source_file.getPathToFile()) << "\">\n"; //checksum if (source_file.getChecksumType() == SourceFile::SHA1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000569\" name=\"SHA-1\" value=\"" << source_file.getChecksum() << "\" />\n"; } else if (source_file.getChecksumType() == SourceFile::MD5) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000568\" name=\"MD5\" value=\"" << source_file.getChecksum() << "\" />\n"; } else //FORCED { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000569\" name=\"SHA-1\" value=\"\" />\n"; } //file type ControlledVocabulary::CVTerm ft_term = getChildWithName_("MS:1000560", source_file.getFileType()); if (ft_term.id.empty() && source_file.getFileType().hasSuffix("file")) { ft_term = getChildWithName_("MS:1000560", source_file.getFileType().chop(4) + "format"); // this is born out of desperation that sourcefile has a string interface for its filetype and not the enum, which could have been easily manipulated to the updated cv } if (ft_term.id != "") { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << ft_term.id << "\" name=\"" << ft_term.name << "\" />\n"; } else //FORCED { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000564\" name=\"PSI mzData format\" />\n"; } //native ID format ControlledVocabulary::CVTerm id_term = getChildWithName_("MS:1000767", source_file.getNativeIDType()); if (id_term.id != "") { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << id_term.id << "\" name=\"" << id_term.name << "\" />\n"; } else //FORCED { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000777\" name=\"spectrum identifier nativeID format\" />\n"; } writeUserParam_(os, source_file, 4, "/mzML/fileDescription/sourceFileList/sourceFile/cvParam/@accession", validator); os << "\t\t\t</sourceFile>\n"; } template <typename MapType> void MzMLHandler<MapType>::writeDataProcessing_(std::ostream& os, const String& id, const std::vector<DataProcessing>& dps, Internal::MzMLValidator& validator) { os << "\t\t<dataProcessing id=\"" << id << "\">\n"; //FORCED if (dps.empty()) { os << "\t\t\t<processingMethod order=\"0\" softwareRef=\"so_default\">\n"; os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000544\" name=\"Conversion to mzML\" />\n"; os << "\t\t\t\t<userParam name=\"warning\" type=\"xsd:string\" value=\"fictional processing method used to fulfill format requirements\" />\n"; os << "\t\t\t</processingMethod>\n"; } bool written = false; for (Size i = 0; i < dps.size(); ++i) { //data processing action os << "\t\t\t<processingMethod order=\"0\" softwareRef=\"so_" << id << "_pm_" << i << "\">\n"; if (dps[i].getProcessingActions().count(DataProcessing::DATA_PROCESSING) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000543\" name=\"data processing action\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CHARGE_DECONVOLUTION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000034\" name=\"charge deconvolution\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::DEISOTOPING) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000033\" name=\"deisotoping\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::SMOOTHING) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000592\" name=\"smoothing\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CHARGE_CALCULATION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000778\" name=\"charge state calculation\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::PRECURSOR_RECALCULATION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000780\" name=\"precursor recalculation\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::BASELINE_REDUCTION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000593\" name=\"baseline reduction\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::PEAK_PICKING) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000035\" name=\"peak picking\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::ALIGNMENT) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000745\" name=\"retention time alignment\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CALIBRATION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001485\" name=\"m/z calibration\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::NORMALIZATION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001484\" name=\"intensity normalization\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::FILTERING) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001486\" name=\"data filtering\" />\n"; written = true; } //file format conversion if (dps[i].getProcessingActions().count(DataProcessing::FORMAT_CONVERSION) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000530\" name=\"file format conversion\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CONVERSION_MZDATA) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000546\" name=\"Conversion to mzData\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CONVERSION_MZML) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000544\" name=\"Conversion to mzML\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CONVERSION_MZXML) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000545\" name=\"Conversion to mzXML\" />\n"; written = true; } if (dps[i].getProcessingActions().count(DataProcessing::CONVERSION_DTA) == 1) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000741\" name=\"Conversion to dta\" />\n"; written = true; } if (!written) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000543\" name=\"data processing action\" />\n"; } //data processing attribute if (dps[i].getCompletionTime().isValid()) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000747\" name=\"completion time\" value=\"" << dps[i].getCompletionTime().toString("yyyy-MM-dd+hh:mm").toStdString() << "\" />\n"; } writeUserParam_(os, dps[i], 4, "/mzML/dataProcessingList/dataProcessing/processingMethod/cvParam/@accession", validator); os << "\t\t\t</processingMethod>\n"; } os << "\t\t</dataProcessing>\n"; } template <typename MapType> void MzMLHandler<MapType>::writePrecursor_(std::ostream& os, const Precursor& precursor, Internal::MzMLValidator& validator) { os << "\t\t\t\t\t<precursor>\n"; //-------------------------------------------------------------------------------------------- //isolation window //-------------------------------------------------------------------------------------------- os << "\t\t\t\t\t\t<isolationWindow>\n"; os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"" << precursor.getMZ() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000828\" name=\"isolation window lower offset\" value=\"" << precursor.getIsolationWindowLowerOffset() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000829\" name=\"isolation window upper offset\" value=\"" << precursor.getIsolationWindowUpperOffset() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t</isolationWindow>\n"; //userParam: no extra object for it => no user parameters //-------------------------------------------------------------------------------------------- //selected ion list //-------------------------------------------------------------------------------------------- os << "\t\t\t\t\t\t<selectedIonList count=\"1\">\n"; os << "\t\t\t\t\t\t\t<selectedIon>\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000744\" name=\"selected ion m/z\" value=\"" << precursor.getMZ() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000041\" name=\"charge state\" value=\"" << precursor.getCharge() << "\" />\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000042\" name=\"peak intensity\" value=\"" << precursor.getIntensity() << "\" unitAccession=\"MS:1000132\" unitName=\"percent of base peak\" unitCvRef=\"MS\" />\n"; for (Size j = 0; j < precursor.getPossibleChargeStates().size(); ++j) { os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000633\" name=\"possible charge state\" value=\"" << precursor.getPossibleChargeStates()[j] << "\" />\n"; } //userParam: no extra object for it => no user parameters os << "\t\t\t\t\t\t\t</selectedIon>\n"; os << "\t\t\t\t\t\t</selectedIonList>\n"; //-------------------------------------------------------------------------------------------- //activation //-------------------------------------------------------------------------------------------- os << "\t\t\t\t\t\t<activation>\n"; #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfloat-equal" if (precursor.getActivationEnergy() != 0) #pragma clang diagnostic pop { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000509\" name=\"activation energy\" value=\"" << precursor.getActivationEnergy() << "\" unitAccession=\"UO:0000266\" unitName=\"electronvolt\" unitCvRef=\"UO\" />\n"; } if (precursor.getActivationMethods().count(Precursor::CID) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000133\" name=\"collision-induced dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::PD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000134\" name=\"plasma desorption\" />\n"; } if (precursor.getActivationMethods().count(Precursor::PSD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000135\" name=\"post-source decay\" />\n"; } if (precursor.getActivationMethods().count(Precursor::SID) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000136\" name=\"surface-induced dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::BIRD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000242\" name=\"blackbody infrared radiative dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::ECD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000250\" name=\"electron capture dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::IMD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000262\" name=\"infrared multiphoton dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::SORI) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000282\" name=\"sustained off-resonance irradiation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::HCID) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000422\" name=\"high-energy collision-induced dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::LCID) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000433\" name=\"low-energy collision-induced dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::PHD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000435\" name=\"photodissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::ETD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000598\" name=\"electron transfer dissociation\" />\n"; } if (precursor.getActivationMethods().count(Precursor::PQD) != 0) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000599\" name=\"pulsed q dissociation\" />\n"; } if (precursor.getActivationMethods().empty()) { os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000044\" name=\"dissociation method\" />\n"; } //as "precursor" has no own user param its userParam is stored here writeUserParam_(os, precursor, 6, "/mzML/run/spectrumList/spectrum/precursorList/precursor/activation/cvParam/@accession", validator); os << "\t\t\t\t\t\t</activation>\n"; os << "\t\t\t\t\t</precursor>\n"; } template <typename MapType> void MzMLHandler<MapType>::writeProduct_(std::ostream& os, const Product& product, Internal::MzMLValidator& validator) { os << "\t\t\t\t\t<product>\n"; os << "\t\t\t\t\t\t<isolationWindow>\n"; os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000827\" name=\"isolation window target m/z\" value=\"" << product.getMZ() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000828\" name=\"isolation window lower offset\" value=\"" << product.getIsolationWindowLowerOffset() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000829\" name=\"isolation window upper offset\" value=\"" << product.getIsolationWindowUpperOffset() << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; writeUserParam_(os, product, 7, "/mzML/run/spectrumList/spectrum/productList/product/isolationWindow/cvParam/@accession", validator); os << "\t\t\t\t\t\t</isolationWindow>\n"; os << "\t\t\t\t\t</product>\n"; } template <typename MapType> void MzMLHandler<MapType>::writeTo(std::ostream& os) { const MapType& exp = *(cexp_); logger_.startProgress(0, exp.size() + exp.getChromatograms().size(), "storing mzML file"); int progress = 0; Internal::MzMLValidator validator(mapping_, cv_); std::vector<std::vector<DataProcessing> > dps; //-------------------------------------------------------------------------------------------- //header //-------------------------------------------------------------------------------------------- writeHeader_(os, exp, dps, validator); //-------------------------------------------------------------------------------------------- //spectrum //-------------------------------------------------------------------------------------------- if (exp.size() != 0) { // INFO : do not try to be smart and skip empty spectra or // chromatograms. There can be very good reasons for this (e.g. if the // meta information needs to be stored here but the actual data is // stored somewhere else). os << "\t\t<spectrumList count=\"" << exp.size() << "\" defaultDataProcessingRef=\"dp_sp_0\">\n"; //check native ids bool renew_native_ids = false; for (Size s = 0; s < exp.size(); ++s) { if (!exp[s].getNativeID().has('=')) { renew_native_ids = true; break; } } //issue warning if something is wrong if (renew_native_ids) { warning(STORE, String("Invalid native IDs detected. Using spectrum identifier nativeID format (spectrum=xsd:nonNegativeInteger) for all spectra.")); } //write actual data for (Size s = 0; s < exp.size(); ++s) { logger_.setProgress(progress++); const SpectrumType& spec = exp[s]; writeSpectrum_(os, spec, s, validator, renew_native_ids, dps); } os << "\t\t</spectrumList>\n"; } //-------------------------------------------------------------------------------------------- //chromatograms //-------------------------------------------------------------------------------------------- if (!exp.getChromatograms().empty()) { // INFO : do not try to be smart and skip empty spectra or // chromatograms. There can be very good reasons for this (e.g. if the // meta information needs to be stored here but the actual data is // stored somewhere else). os << "\t\t<chromatogramList count=\"" << exp.getChromatograms().size() << "\" defaultDataProcessingRef=\"dp_sp_0\">\n"; for (Size c = 0; c != exp.getChromatograms().size(); ++c) { logger_.setProgress(progress++); const ChromatogramType& chromatogram = exp.getChromatograms()[c]; writeChromatogram_(os, chromatogram, c, validator); } os << "\t\t</chromatogramList>" << "\n"; } MzMLHandlerHelper::writeFooter_(os, options_, spectra_offsets, chromatograms_offsets); logger_.endProgress(); } template <typename MapType> void MzMLHandler<MapType>::writeHeader_(std::ostream& os, const MapType& exp, std::vector<std::vector<DataProcessing> >& dps, Internal::MzMLValidator& validator) { os << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n"; if (options_.getWriteIndex()) { os << "<indexedmzML xmlns=\"http://psi.hupo.org/ms/mzml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://psi.hupo.org/ms/mzml http://psidev.info/files/ms/mzML/xsd/mzML1.1.0_idx.xsd\">\n"; } os << "<mzML xmlns=\"http://psi.hupo.org/ms/mzml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://psi.hupo.org/ms/mzml http://psidev.info/files/ms/mzML/xsd/mzML1.1.0.xsd\" accession=\"" << writeXMLEscape(exp.getIdentifier()) << "\" version=\"" << version_ << "\">\n"; //-------------------------------------------------------------------------------------------- // CV list //-------------------------------------------------------------------------------------------- os << "\t<cvList count=\"5\">\n" << "\t\t<cv id=\"MS\" fullName=\"Proteomics Standards Initiative Mass Spectrometry Ontology\" URI=\"http://psidev.cvs.sourceforge.net/*checkout*/psidev/psi/psi-ms/mzML/controlledVocabulary/psi-ms.obo\"/>\n" << "\t\t<cv id=\"UO\" fullName=\"Unit Ontology\" URI=\"http://obo.cvs.sourceforge.net/obo/obo/ontology/phenotype/unit.obo\"/>\n" << "\t\t<cv id=\"BTO\" fullName=\"BrendaTissue545\" version=\"unknown\" URI=\"http://www.brenda-enzymes.info/ontology/tissue/tree/update/update_files/BrendaTissueOBO\"/>\n" << "\t\t<cv id=\"GO\" fullName=\"Gene Ontology - Slim Versions\" version=\"unknown\" URI=\"http://www.geneontology.org/GO_slims/goslim_goa.obo\"/>\n" << "\t\t<cv id=\"PATO\" fullName=\"Quality ontology\" version=\"unknown\" URI=\"http://obo.cvs.sourceforge.net/*checkout*/obo/obo/ontology/phenotype/quality.obo\"/>\n" << "\t</cvList>\n"; //-------------------------------------------------------------------------------------------- // file content //-------------------------------------------------------------------------------------------- os << "\t<fileDescription>\n"; os << "\t\t<fileContent>\n"; Map<InstrumentSettings::ScanMode, UInt> file_content; for (Size i = 0; i < exp.size(); ++i) { ++file_content[exp[i].getInstrumentSettings().getScanMode()]; } if (file_content.has(InstrumentSettings::MASSSPECTRUM)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000294\" name=\"mass spectrum\" />\n"; } if (file_content.has(InstrumentSettings::MS1SPECTRUM)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000579\" name=\"MS1 spectrum\" />\n"; } if (file_content.has(InstrumentSettings::MSNSPECTRUM)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000580\" name=\"MSn spectrum\" />\n"; } if (file_content.has(InstrumentSettings::SIM)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000582\" name=\"SIM spectrum\" />\n"; } if (file_content.has(InstrumentSettings::SRM)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000583\" name=\"SRM spectrum\" />\n"; } if (file_content.has(InstrumentSettings::CRM)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000581\" name=\"CRM spectrum\" />\n"; } if (file_content.has(InstrumentSettings::PRECURSOR)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000341\" name=\"precursor ion spectrum\" />\n"; } if (file_content.has(InstrumentSettings::CNG)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000325\" name=\"constant neutral gain spectrum\" />\n"; } if (file_content.has(InstrumentSettings::CNL)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000326\" name=\"constant neutral loss spectrum\" />\n"; } if (file_content.has(InstrumentSettings::EMR)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000804\" name=\"electromagnetic radiation spectrum\" />\n"; } if (file_content.has(InstrumentSettings::EMISSION)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000805\" name=\"emission spectrum\" />\n"; } if (file_content.has(InstrumentSettings::ABSORBTION)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000806\" name=\"absorption spectrum\" />\n"; } if (file_content.has(InstrumentSettings::EMC)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000789\" name=\"enhanced multiply charged spectrum\" />\n"; } if (file_content.has(InstrumentSettings::TDF)) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000789\" name=\"time-delayed fragmentation spectrum\" />\n"; } if (file_content.has(InstrumentSettings::UNKNOWN) || file_content.empty()) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000294\" name=\"mass spectrum\" />\n"; } // writeUserParam_(os, exp, 3, "/mzML/fileDescription/fileContent/cvParam/@accession", validator); os << "\t\t</fileContent>\n"; //-------------------------------------------------------------------------------------------- // source file list //-------------------------------------------------------------------------------------------- //find out how many spectra source files need to be written UInt sf_sp_count = 0; for (Size i = 0; i < exp.size(); ++i) { if (exp[i].getSourceFile() != SourceFile()) { ++sf_sp_count; } } if (exp.getSourceFiles().size() > 0 || sf_sp_count > 0) { os << "\t\t<sourceFileList count=\"" << exp.getSourceFiles().size() + sf_sp_count << "\">\n"; //write source file of run for (Size i = 0; i < exp.getSourceFiles().size(); ++i) { writeSourceFile_(os, String("sf_ru_") + String(i), exp.getSourceFiles()[i], validator); } // write source files of spectra if (sf_sp_count > 0) { const SourceFile sf_default; for (Size i = 0; i < exp.size(); ++i) { if (exp[i].getSourceFile() != sf_default) { writeSourceFile_(os, String("sf_sp_") + i, exp[i].getSourceFile(), validator); } } } os << "\t\t</sourceFileList>\n"; } //-------------------------------------------------------------------------------------------- // contacts //-------------------------------------------------------------------------------------------- for (Size i = 0; i < exp.getContacts().size(); ++i) { const ContactPerson& cp = exp.getContacts()[i]; os << "\t\t<contact>\n"; os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000586\" name=\"contact name\" value=\"" << writeXMLEscape(cp.getLastName()) << ", " << writeXMLEscape(cp.getFirstName()) << "\" />\n"; os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000590\" name=\"contact affiliation\" value=\"" << writeXMLEscape(cp.getInstitution()) << "\" />\n"; if (cp.getAddress() != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000587\" name=\"contact address\" value=\"" << writeXMLEscape(cp.getAddress()) << "\" />\n"; } if (cp.getURL() != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000588\" name=\"contact URL\" value=\"" << writeXMLEscape(cp.getURL()) << "\" />\n"; } if (cp.getEmail() != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000589\" name=\"contact email\" value=\"" << writeXMLEscape(cp.getEmail()) << "\" />\n"; } if (cp.getContactInfo() != "") { os << "\t\t\t<userParam name=\"contact_info\" type=\"xsd:string\" value=\"" << writeXMLEscape(cp.getContactInfo()) << "\" />\n"; } writeUserParam_(os, cp, 3, "/mzML/fileDescription/contact/cvParam/@accession", validator); os << "\t\t</contact>\n"; } os << "\t</fileDescription>\n"; //-------------------------------------------------------------------------------------------- // sample //-------------------------------------------------------------------------------------------- const Sample& sa = exp.getSample(); os << "\t<sampleList count=\"1\">\n"; os << "\t\t<sample id=\"sa_0\" name=\"" << writeXMLEscape(sa.getName()) << "\">\n"; if (sa.getNumber() != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000001\" name=\"sample number\" value=\"" << writeXMLEscape(sa.getNumber()) << "\" />\n"; } os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000004\" name=\"sample mass\" value=\"" << sa.getMass() << "\" unitAccession=\"UO:0000021\" unitName=\"gram\" unitCvRef=\"UO\" />\n"; os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000005\" name=\"sample volume\" value=\"" << sa.getVolume() << "\" unitAccession=\"UO:0000098\" unitName=\"milliliter\" unitCvRef=\"UO\" />\n"; os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000006\" name=\"sample concentration\" value=\"" << sa.getConcentration() << "\" unitAccession=\"UO:0000175\" unitName=\"gram per liter\" unitCvRef=\"UO\" />\n"; if (sa.getState() == Sample::EMULSION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000047\" name=\"emulsion\" />\n"; } else if (sa.getState() == Sample::GAS) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000048\" name=\"gas\" />\n"; } else if (sa.getState() == Sample::LIQUID) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000049\" name=\"liquid\" />\n"; } else if (sa.getState() == Sample::SOLID) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000050\" name=\"solid\" />\n"; } else if (sa.getState() == Sample::SOLUTION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000051\" name=\"solution\" />\n"; } else if (sa.getState() == Sample::SUSPENSION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000052\" name=\"suspension\" />\n"; } if (sa.getComment() != "") { os << "\t\t\t<userParam name=\"comment\" type=\"xsd:string\" value=\"" << writeXMLEscape(sa.getComment()) << "\" />\n"; } writeUserParam_(os, sa, 3, "/mzML/sampleList/sample/cvParam/@accession", validator); os << "\t\t</sample>\n"; os << "\t</sampleList>\n"; //-------------------------------------------------------------------------------------------- // software //-------------------------------------------------------------------------------------------- // create a list of all different data processings Size num_software(2); // instrument software is always written for (Size s = 0; s < exp.size(); ++s) { if (find(dps.begin(), dps.end(), exp[s].getDataProcessing()) == dps.end()) { dps.push_back(exp[s].getDataProcessing()); num_software += exp[s].getDataProcessing().size(); } } for (Size s = 0; s < exp.getChromatograms().size(); ++s) { if (find(dps.begin(), dps.end(), exp.getChromatograms()[s].getDataProcessing()) == dps.end()) { dps.push_back(exp.getChromatograms()[s].getDataProcessing()); num_software += exp.getChromatograms()[s].getDataProcessing().size(); } } // count binary data array software Size num_bi_software(0); for (Size s = 0; s < exp.size(); ++s) { for (Size m = 0; m < exp[s].getFloatDataArrays().size(); ++m) { for (Size i = 0; i < exp[s].getFloatDataArrays()[m].getDataProcessing().size(); ++i) { ++num_bi_software; } } } os << "\t<softwareList count=\"" << num_software + num_bi_software << "\">\n"; //write instrument software writeSoftware_(os, "so_in_0", exp.getInstrument().getSoftware(), validator); //write fallback software writeSoftware_(os, "so_default", Software(), validator); // write the software of the dps for (Size s1 = 0; s1 != dps.size(); ++s1) { for (Size s2 = 0; s2 != dps[s1].size(); ++s2) { writeSoftware_(os, String("so_dp_sp_") + s1 + "_pm_" + s2, dps[s1][s2].getSoftware(), validator); } } //write data processing (for each binary data array) for (Size s = 0; s < exp.size(); ++s) { for (Size m = 0; m < exp[s].getFloatDataArrays().size(); ++m) { for (Size i = 0; i < exp[s].getFloatDataArrays()[m].getDataProcessing().size(); ++i) { writeSoftware_(os, String("so_dp_sp_") + s + "_bi_" + m + "_pm_" + i, exp[s].getFloatDataArrays()[m].getDataProcessing()[i].getSoftware(), validator); } } } os << "\t</softwareList>\n"; //-------------------------------------------------------------------------------------------- // instrument configuration (enclosing ion source, mass analyzer and detector) //-------------------------------------------------------------------------------------------- const Instrument& in = exp.getInstrument(); os << "\t<instrumentConfigurationList count=\"1\">\n"; os << "\t\t<instrumentConfiguration id=\"ic_0\">\n"; ControlledVocabulary::CVTerm in_term = getChildWithName_("MS:1000031", in.getName()); if (in_term.id != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"" << in_term.id << "\" name=\"" << writeXMLEscape(in_term.name) << "\" />\n"; } else { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000031\" name=\"instrument model\" />\n"; } if (in.getCustomizations() != "") { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000032\" name=\"customization\" value=\"" << writeXMLEscape(in.getCustomizations()) << "\" />\n"; } //ion optics if (in.getIonOptics() == Instrument::MAGNETIC_DEFLECTION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000221\" name=\"magnetic deflection\" />\n"; } else if (in.getIonOptics() == Instrument::DELAYED_EXTRACTION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000246\" name=\"delayed extraction\" />\n"; } else if (in.getIonOptics() == Instrument::COLLISION_QUADRUPOLE) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000275\" name=\"collision quadrupole\" />\n"; } else if (in.getIonOptics() == Instrument::SELECTED_ION_FLOW_TUBE) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000281\" name=\"selected ion flow tube\" />\n"; } else if (in.getIonOptics() == Instrument::TIME_LAG_FOCUSING) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000286\" name=\"time lag focusing\" />\n"; } else if (in.getIonOptics() == Instrument::REFLECTRON) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000300\" name=\"reflectron\" />\n"; } else if (in.getIonOptics() == Instrument::EINZEL_LENS) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000307\" name=\"einzel lens\" />\n"; } else if (in.getIonOptics() == Instrument::FIRST_STABILITY_REGION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000309\" name=\"first stability region\" />\n"; } else if (in.getIonOptics() == Instrument::FRINGING_FIELD) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000310\" name=\"fringing field\" />\n"; } else if (in.getIonOptics() == Instrument::KINETIC_ENERGY_ANALYZER) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000311\" name=\"kinetic energy analyzer\" />\n"; } else if (in.getIonOptics() == Instrument::STATIC_FIELD) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000320\" name=\"static field\" />\n"; } writeUserParam_(os, in, 3, "/mzML/instrumentConfigurationList/instrumentConfiguration/cvParam/@accession", validator); Size component_count = in.getIonSources().size() + in.getMassAnalyzers().size() + in.getIonDetectors().size(); if (component_count != 0) { os << "\t\t\t<componentList count=\"" << (std::max)((Size)3, component_count) << "\">\n"; //-------------------------------------------------------------------------------------------- // ion source //-------------------------------------------------------------------------------------------- for (Size i = 0; i < in.getIonSources().size(); ++i) { const IonSource& so = in.getIonSources()[i]; os << "\t\t\t\t<source order=\"" << so.getOrder() << "\">\n"; if (so.getInletType() == IonSource::CONTINUOUSFLOWFASTATOMBOMBARDMENT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000055\" name=\"continuous flow fast atom bombardment\" />\n"; } else if (so.getInletType() == IonSource::DIRECT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000056\" name=\"direct inlet\" />\n"; } else if (so.getInletType() == IonSource::ELECTROSPRAYINLET) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000057\" name=\"electrospray inlet\" />\n"; } else if (so.getInletType() == IonSource::FLOWINJECTIONANALYSIS) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000058\" name=\"flow injection analysis\" />\n"; } else if (so.getInletType() == IonSource::INDUCTIVELYCOUPLEDPLASMA) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000059\" name=\"inductively coupled plasma\" />\n"; } else if (so.getInletType() == IonSource::INFUSION) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000060\" name=\"infusion\" />\n"; } else if (so.getInletType() == IonSource::JETSEPARATOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000061\" name=\"jet separator\" />\n"; } else if (so.getInletType() == IonSource::MEMBRANESEPARATOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000062\" name=\"membrane separator\" />\n"; } else if (so.getInletType() == IonSource::MOVINGBELT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000063\" name=\"moving belt\" />\n"; } else if (so.getInletType() == IonSource::MOVINGWIRE) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000064\" name=\"moving wire\" />\n"; } else if (so.getInletType() == IonSource::OPENSPLIT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000065\" name=\"open split\" />\n"; } else if (so.getInletType() == IonSource::PARTICLEBEAM) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000066\" name=\"particle beam\" />\n"; } else if (so.getInletType() == IonSource::RESERVOIR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000067\" name=\"reservoir\" />\n"; } else if (so.getInletType() == IonSource::SEPTUM) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000068\" name=\"septum\" />\n"; } else if (so.getInletType() == IonSource::THERMOSPRAYINLET) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000069\" name=\"thermospray inlet\" />\n"; } else if (so.getInletType() == IonSource::BATCH) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000248\" name=\"direct insertion probe\" />\n"; } else if (so.getInletType() == IonSource::CHROMATOGRAPHY) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000249\" name=\"direct liquid introduction\" />\n"; } else if (so.getInletType() == IonSource::MEMBRANE) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000396\" name=\"membrane inlet\" />\n"; } else if (so.getInletType() == IonSource::NANOSPRAY) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000485\" name=\"nanospray inlet\" />\n"; } if (so.getIonizationMethod() == IonSource::APCI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000070\" name=\"atmospheric pressure chemical ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::CI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000071\" name=\"chemical ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::ESI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000073\" name=\"electrospray ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::FAB) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000074\" name=\"fast atom bombardment ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::MALDI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000075\" name=\"matrix-assisted laser desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::MPI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000227\" name=\"multiphoton ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::AP_MALDI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000239\" name=\"atmospheric pressure matrix-assisted laser desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::API) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000240\" name=\"atmospheric pressure ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::DI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000247\" name=\"desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::FA) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000255\" name=\"flowing afterglow\" />\n"; } else if (so.getIonizationMethod() == IonSource::FD) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000257\" name=\"field desorption\" />\n"; } else if (so.getIonizationMethod() == IonSource::FI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000258\" name=\"field ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::GD_MS) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000259\" name=\"glow discharge ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::NICI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000271\" name=\"Negative ion chemical ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::NRMS) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000272\" name=\"neutralization reionization mass spectrometry\" />\n"; } else if (so.getIonizationMethod() == IonSource::PI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000273\" name=\"photoionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::PYMS) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000274\" name=\"pyrolysis mass spectrometry\" />\n"; } else if (so.getIonizationMethod() == IonSource::REMPI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000276\" name=\"resonance enhanced multiphoton ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SELDI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000278\" name=\"surface enhanced laser desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SEND) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000279\" name=\"surface enhanced neat desorption\" />\n"; } else if (so.getIonizationMethod() == IonSource::AI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000380\" name=\"adiabatic ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::ASI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000381\" name=\"associative ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::APPI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000382\" name=\"atmospheric pressure photoionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::AD) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000383\" name=\"autodetachment\" />\n"; } else if (so.getIonizationMethod() == IonSource::AUI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000384\" name=\"autoionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::CEI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000385\" name=\"charge exchange ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::CHEMI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000386\" name=\"chemi-ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SILI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000387\" name=\"desorption/ionization on silicon\" />\n"; } else if (so.getIonizationMethod() == IonSource::DISSI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000388\" name=\"dissociative ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::EI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000389\" name=\"electron ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::LD) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000393\" name=\"laser desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::LSI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000395\" name=\"liquid secondary ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::MESI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000397\" name=\"microelectrospray\" />\n"; } else if (so.getIonizationMethod() == IonSource::NESI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000398\" name=\"nanoelectrospray\" />\n"; } else if (so.getIonizationMethod() == IonSource::PEI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000399\" name=\"penning ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::PD) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000400\" name=\"plasma desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000402\" name=\"secondary ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SOI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000403\" name=\"soft ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SPI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000404\" name=\"spark ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SALDI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000405\" name=\"surface-assisted laser desorption ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::SUI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000406\" name=\"surface ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::TI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000407\" name=\"thermal ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::VI) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000408\" name=\"vertical ionization\" />\n"; } else if (so.getIonizationMethod() == IonSource::FIB) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000446\" name=\"fast ion bombardment\" />\n"; } else if (so.getIonizationMethod() == IonSource::IONMETHODNULL) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000008\" name=\"ionization type\" />\n"; } writeUserParam_(os, so, 5, "/mzML/instrumentConfigurationList/instrumentConfiguration/componentList/source/cvParam/@accession", validator); os << "\t\t\t\t</source>\n"; } //FORCED if (component_count < 3 && in.getIonSources().empty()) { os << "\t\t\t\t<source order=\"1234\">\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000446\" name=\"fast ion bombardment\" />\n"; os << "\t\t\t\t\t<userParam name=\"warning\" type=\"xsd:string\" value=\"invented ion source, to fulfill mzML schema\" />\n"; os << "\t\t\t\t</source>\n"; } //-------------------------------------------------------------------------------------------- // mass analyzer //-------------------------------------------------------------------------------------------- for (Size i = 0; i < in.getMassAnalyzers().size(); ++i) { const MassAnalyzer& ma = in.getMassAnalyzers()[i]; os << "\t\t\t\t<analyzer order=\"" << ma.getOrder() << "\">\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000014\" name=\"accuracy\" value=\"" << ma.getAccuracy() << "\" unitAccession=\"UO:0000169\" unitName=\"parts per million\" unitCvRef=\"UO\" />\n"; // @todo: the parameters below are instrument specific and should not be written every time os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000022\" name=\"TOF Total Path Length\" value=\"" << ma.getTOFTotalPathLength() << "\" unitAccession=\"UO:0000008\" unitName=\"meter\" unitCvRef=\"UO\" />\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000024\" name=\"final MS exponent\" value=\"" << ma.getFinalMSExponent() << "\" />\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000025\" name=\"magnetic field strength\" value=\"" << ma.getMagneticFieldStrength() << "\" unitAccession=\"UO:0000228\" unitName=\"tesla\" unitCvRef=\"UO\" />\n"; if (ma.getReflectronState() == MassAnalyzer::ON) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000106\" name=\"reflectron on\" />\n"; } else if (ma.getReflectronState() == MassAnalyzer::OFF) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000105\" name=\"reflectron off\" />\n"; } if (ma.getType() == MassAnalyzer::FOURIERTRANSFORM) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000079\" name=\"fourier transform ion cyclotron resonance mass spectrometer\" />\n"; } else if (ma.getType() == MassAnalyzer::SECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000080\" name=\"magnetic sector\" />\n"; } else if (ma.getType() == MassAnalyzer::QUADRUPOLE) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000081\" name=\"quadrupole\" />\n"; } else if (ma.getType() == MassAnalyzer::TOF) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000084\" name=\"time-of-flight\" />\n"; } else if (ma.getType() == MassAnalyzer::ESA) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000254\" name=\"electrostatic energy analyzer\" />\n"; } else if (ma.getType() == MassAnalyzer::IT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000264\" name=\"ion trap\" />\n"; } else if (ma.getType() == MassAnalyzer::SWIFT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000284\" name=\"stored waveform inverse fourier transform\" />\n"; } else if (ma.getType() == MassAnalyzer::CYCLOTRON) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000288\" name=\"cyclotron\" />\n"; } else if (ma.getType() == MassAnalyzer::ORBITRAP) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000484\" name=\"orbitrap\" />\n"; } else if (ma.getType() == MassAnalyzer::AXIALEJECTIONLINEARIONTRAP) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000078\" name=\"axial ejection linear ion trap\" />\n"; } else if (ma.getType() == MassAnalyzer::PAULIONTRAP) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000082\" name=\"quadrupole ion trap\" />\n"; } else if (ma.getType() == MassAnalyzer::RADIALEJECTIONLINEARIONTRAP) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000083\" name=\"radial ejection linear ion trap\" />\n"; } else if (ma.getType() == MassAnalyzer::LIT) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000291\" name=\"linear ion trap\" />\n"; } else if (ma.getType() == MassAnalyzer::ANALYZERNULL) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000443\" name=\"mass analyzer type\" />\n"; } writeUserParam_(os, ma, 5, "/mzML/instrumentConfigurationList/instrumentConfiguration/componentList/analyzer/cvParam/@accession", validator); os << "\t\t\t\t</analyzer>\n"; } //FORCED if (component_count < 3 && in.getMassAnalyzers().empty()) { os << "\t\t\t\t<analyzer order=\"1234\">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000288\" name=\"cyclotron\" />\n"; os << "\t\t\t\t\t<userParam name=\"warning\" type=\"xsd:string\" value=\"invented mass analyzer, to fulfill mzML schema\" />\n"; os << "\t\t\t\t</analyzer>\n"; } //-------------------------------------------------------------------------------------------- // ion detector //-------------------------------------------------------------------------------------------- for (Size i = 0; i < in.getIonDetectors().size(); ++i) { const IonDetector& id = in.getIonDetectors()[i]; os << "\t\t\t\t<detector order=\"" << id.getOrder() << "\">\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000028\" name=\"detector resolution\" value=\"" << id.getResolution() << "\" />\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000029\" name=\"sampling frequency\" value=\"" << id.getADCSamplingFrequency() << "\" unitAccession=\"UO:0000106\" unitName=\"hertz\" unitCvRef=\"UO\" />\n"; if (id.getAcquisitionMode() == IonDetector::ADC) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000117\" name=\"analog-digital converter\" />\n"; } else if (id.getAcquisitionMode() == IonDetector::PULSECOUNTING) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000118\" name=\"pulse counting\" />\n"; } else if (id.getAcquisitionMode() == IonDetector::TDC) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000119\" name=\"time-digital converter\" />\n"; } else if (id.getAcquisitionMode() == IonDetector::TRANSIENTRECORDER) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000120\" name=\"transient recorder\" />\n"; } if (id.getType() == IonDetector::CHANNELTRON) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000107\" name=\"channeltron\" />\n"; } else if (id.getType() == IonDetector::DALYDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000110\" name=\"daly detector\" />\n"; } else if (id.getType() == IonDetector::FARADAYCUP) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000112\" name=\"faraday cup\" />\n"; } else if (id.getType() == IonDetector::MICROCHANNELPLATEDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000114\" name=\"microchannel plate detector\" />\n"; } else if (id.getType() == IonDetector::MULTICOLLECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000115\" name=\"multi-collector\" />\n"; } else if (id.getType() == IonDetector::PHOTOMULTIPLIER) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000116\" name=\"photomultiplier\" />\n"; } else if (id.getType() == IonDetector::ELECTRONMULTIPLIER) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000253\" name=\"electron multiplier\" />\n"; } else if (id.getType() == IonDetector::ARRAYDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000345\" name=\"array detector\" />\n"; } else if (id.getType() == IonDetector::CONVERSIONDYNODE) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000346\" name=\"conversion dynode\" />\n"; } else if (id.getType() == IonDetector::DYNODE) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000347\" name=\"dynode\" />\n"; } else if (id.getType() == IonDetector::FOCALPLANECOLLECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000348\" name=\"focal plane collector\" />\n"; } else if (id.getType() == IonDetector::IONTOPHOTONDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000349\" name=\"ion-to-photon detector\" />\n"; } else if (id.getType() == IonDetector::POINTCOLLECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000350\" name=\"point collector\" />\n"; } else if (id.getType() == IonDetector::POSTACCELERATIONDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000351\" name=\"postacceleration detector\" />\n"; } else if (id.getType() == IonDetector::PHOTODIODEARRAYDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000621\" name=\"photodiode array detector\" />\n"; } else if (id.getType() == IonDetector::INDUCTIVEDETECTOR) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000624\" name=\"inductive detector\" />\n"; } else if (id.getType() == IonDetector::CONVERSIONDYNODEELECTRONMULTIPLIER) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000108\" name=\"conversion dynode electron multiplier\" />\n"; } else if (id.getType() == IonDetector::CONVERSIONDYNODEPHOTOMULTIPLIER) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000109\" name=\"conversion dynode photomultiplier\" />\n"; } else if (id.getType() == IonDetector::ELECTRONMULTIPLIERTUBE) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000111\" name=\"electron multiplier tube\" />\n"; } else if (id.getType() == IonDetector::FOCALPLANEARRAY) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000113\" name=\"focal plane array\" />\n"; } else if (id.getType() == IonDetector::TYPENULL) { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000026\" name=\"detector type\" />\n"; } writeUserParam_(os, id, 5, "/mzML/instrumentConfigurationList/instrumentConfiguration/componentList/detector/cvParam/@accession", validator); os << "\t\t\t\t</detector>\n"; } //FORCED if (component_count < 3 && in.getIonDetectors().empty()) { os << "\t\t\t\t<detector order=\"1234\">\n"; os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000107\" name=\"channeltron\" />\n"; os << "\t\t\t\t\t<userParam name=\"warning\" type=\"xsd:string\" value=\"invented ion detector, to fulfill mzML schema\" />\n"; os << "\t\t\t\t</detector>\n"; } os << "\t\t\t</componentList>\n"; } os << "\t\t\t<softwareRef ref=\"so_in_0\" />\n"; os << "\t\t</instrumentConfiguration>\n"; os << "\t</instrumentConfigurationList>\n"; //-------------------------------------------------------------------------------------------- // data processing //-------------------------------------------------------------------------------------------- // count number of float data array dps Size num_bi_dps(0); for (Size s = 0; s < exp.size(); ++s) { for (Size m = 0; m < exp[s].getFloatDataArrays().size(); ++m) { ++num_bi_dps; } } os << "\t<dataProcessingList count=\"" << (std::max)((Size)1, dps.size() + num_bi_dps) << "\">\n"; //default (first spectrum data or fictional data) if (exp.empty()) { std::vector<DataProcessing> dummy; writeDataProcessing_(os, "dp_sp_0", dummy, validator); } for (Size s = 0; s < dps.size(); ++s) { writeDataProcessing_(os, String("dp_sp_") + s, dps[s], validator); } //for each binary data array for (Size s = 0; s < exp.size(); ++s) { for (Size m = 0; m < exp[s].getFloatDataArrays().size(); ++m) { writeDataProcessing_(os, String("dp_sp_") + s + "_bi_" + m, exp[s].getFloatDataArrays()[m].getDataProcessing(), validator); } } os << "\t</dataProcessingList>\n"; //-------------------------------------------------------------------------------------------- // acquisitionSettings //-------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------- // run //-------------------------------------------------------------------------------------------- os << "\t<run id=\"ru_0\" defaultInstrumentConfigurationRef=\"ic_0\" sampleRef=\"sa_0\""; if (exp.getDateTime().isValid()) { os << " startTimeStamp=\"" << exp.getDateTime().get().substitute(' ', 'T') << "\""; } if (exp.getSourceFiles().size() > 0) { os << " defaultSourceFileRef=\"sf_ru_0\""; } os << ">\n"; //run attributes if (exp.getFractionIdentifier() != "") { os << "\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000858\" name=\"fraction identifier\" value=\"" << exp.getFractionIdentifier() << "\" />\n"; } writeUserParam_(os, exp, 2, "/mzML/run/cvParam/@accession", validator); } template <typename MapType> void MzMLHandler<MapType>::writeSpectrum_(std::ostream& os, const SpectrumType& spec, Size s, Internal::MzMLValidator& validator, bool renew_native_ids, std::vector<std::vector<DataProcessing> >& dps) { //native id String native_id = spec.getNativeID(); if (renew_native_ids) { native_id = String("spectrum=") + s; } long offset = os.tellp(); spectra_offsets.push_back(make_pair(native_id, offset + 3)); // IMPORTANT make sure the offset (above) corresponds to the start of the <spectrum tag os << "\t\t\t<spectrum id=\"" << writeXMLEscape(native_id) << "\" index=\"" << s << "\" defaultArrayLength=\"" << spec.size() << "\""; if (spec.getSourceFile() != SourceFile()) { os << " sourceFileRef=\"sf_sp_" << s << "\""; } //the data processing info of the first spectrum is the default //if (s==0 || spec.getDataProcessing()!=exp[0].getDataProcessing()) if (s == 0 || spec.getDataProcessing() != dps[0]) { Size dp_ref_num = s; if (s != 0) { for (Size i = 0; i < dps.size(); ++i) { if (spec.getDataProcessing() == dps[i]) { dp_ref_num = i; break; } } } os << " dataProcessingRef=\"dp_sp_" << dp_ref_num << "\""; } os << ">\n"; //spectrum representation if (spec.getType() == SpectrumSettings::PEAKS) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000127\" name=\"centroid spectrum\" />\n"; } else if (spec.getType() == SpectrumSettings::RAWDATA) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000128\" name=\"profile spectrum\" />\n"; } else { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000525\" name=\"spectrum representation\" />\n"; } //spectrum attributes if (spec.getMSLevel() != 0) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000511\" name=\"ms level\" value=\"" << spec.getMSLevel() << "\" />\n"; } if (spec.getInstrumentSettings().getZoomScan()) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000497\" name=\"zoom scan\" />\n"; } //spectrum type if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::MASSSPECTRUM) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000294\" name=\"mass spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::MS1SPECTRUM) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000579\" name=\"MS1 spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::MSNSPECTRUM) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000580\" name=\"MSn spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::SIM) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000582\" name=\"SIM spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::SRM) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000583\" name=\"SRM spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::CRM) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000581\" name=\"CRM spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::PRECURSOR) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000341\" name=\"precursor ion spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::CNG) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000325\" name=\"constant neutral gain spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::CNL) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000326\" name=\"constant neutral loss spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::EMR) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000804\" name=\"electromagnetic radiation spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::EMISSION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000805\" name=\"emission spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::ABSORBTION) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000806\" name=\"absorption spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::EMC) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000789\" name=\"enhanced multiply charged spectrum\" />\n"; } else if (spec.getInstrumentSettings().getScanMode() == InstrumentSettings::TDF) { os << "\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000789\" name=\"time-delayed fragmentation spectrum\" />\n"; } else //FORCED { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000294\" name=\"mass spectrum\" />\n"; } //scan polarity if (spec.getInstrumentSettings().getPolarity() == IonSource::NEGATIVE) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000129\" name=\"negative scan\" />\n"; } else if (spec.getInstrumentSettings().getPolarity() == IonSource::POSITIVE) { os << "\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000130\" name=\"positive scan\" />\n"; } writeUserParam_(os, spec, 4, "/mzML/run/spectrumList/spectrum/cvParam/@accession", validator); //-------------------------------------------------------------------------------------------- //scan list //-------------------------------------------------------------------------------------------- os << "\t\t\t\t<scanList count=\"" << (std::max)((Size)1, spec.getAcquisitionInfo().size()) << "\">\n"; ControlledVocabulary::CVTerm ai_term = getChildWithName_("MS:1000570", spec.getAcquisitionInfo().getMethodOfCombination()); if (ai_term.id != "") { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << ai_term.id << "\" name=\"" << ai_term.name << "\" />\n"; } else { os << "\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000795\" name=\"no combination\" />\n"; } writeUserParam_(os, spec.getAcquisitionInfo(), 5, "/mzML/run/spectrumList/spectrum/scanList/cvParam/@accession", validator); //-------------------------------------------------------------------------------------------- //scan //-------------------------------------------------------------------------------------------- for (Size j = 0; j < spec.getAcquisitionInfo().size(); ++j) { const Acquisition& ac = spec.getAcquisitionInfo()[j]; os << "\t\t\t\t\t<scan "; if (ac.getIdentifier() != "") os << "externalSpectrumID=\"" << ac.getIdentifier() << "\""; os << ">\n"; if (j == 0) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000016\" name=\"scan start time\" value=\"" << spec.getRT() << "\" unitAccession=\"UO:0000010\" unitName=\"second\" unitCvRef=\"UO\" />\n"; } writeUserParam_(os, ac, 6, "/mzML/run/spectrumList/spectrum/scanList/scan/cvParam/@accession", validator); //scan windows if (j == 0 && spec.getInstrumentSettings().getScanWindows().size() != 0) { os << "\t\t\t\t\t\t<scanWindowList count=\"" << spec.getInstrumentSettings().getScanWindows().size() << "\">\n"; for (Size k = 0; k < spec.getInstrumentSettings().getScanWindows().size(); ++k) { os << "\t\t\t\t\t\t\t<scanWindow>\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000501\" name=\"scan window lower limit\" value=\"" << spec.getInstrumentSettings().getScanWindows()[k].begin << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000500\" name=\"scan window upper limit\" value=\"" << spec.getInstrumentSettings().getScanWindows()[k].end << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; writeUserParam_(os, spec.getInstrumentSettings().getScanWindows()[k], 8, "/mzML/run/spectrumList/spectrum/scanList/scan/scanWindowList/scanWindow/cvParam/@accession", validator); os << "\t\t\t\t\t\t\t</scanWindow>\n"; } os << "\t\t\t\t\t\t</scanWindowList>\n"; } os << "\t\t\t\t\t</scan>\n"; } //fallback if we have no acquisition information (a dummy scan is created for RT and so on) if (spec.getAcquisitionInfo().empty()) { os << "\t\t\t\t\t<scan>\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000016\" name=\"scan start time\" value=\"" << spec.getRT() << "\" unitAccession=\"UO:0000010\" unitName=\"second\" unitCvRef=\"UO\" />\n"; //scan windows if (spec.getInstrumentSettings().getScanWindows().size() != 0) { os << "\t\t\t\t\t\t<scanWindowList count=\"" << spec.getInstrumentSettings().getScanWindows().size() << "\">\n"; for (Size j = 0; j < spec.getInstrumentSettings().getScanWindows().size(); ++j) { os << "\t\t\t\t\t\t\t<scanWindow>\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000501\" name=\"scan window lower limit\" value=\"" << spec.getInstrumentSettings().getScanWindows()[j].begin << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; os << "\t\t\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000500\" name=\"scan window upper limit\" value=\"" << spec.getInstrumentSettings().getScanWindows()[j].end << "\" unitAccession=\"MS:1000040\" unitName=\"m/z\" unitCvRef=\"MS\" />\n"; writeUserParam_(os, spec.getInstrumentSettings().getScanWindows()[j], 8, "/mzML/run/spectrumList/spectrum/scanList/scan/scanWindowList/scanWindow/cvParam/@accession", validator); os << "\t\t\t\t\t\t\t</scanWindow>\n"; } os << "\t\t\t\t\t\t</scanWindowList>\n"; } os << "\t\t\t\t\t</scan>\n"; } os << "\t\t\t\t</scanList>\n"; //-------------------------------------------------------------------------------------------- //precursor list //-------------------------------------------------------------------------------------------- if (!spec.getPrecursors().empty()) { os << "\t\t\t<precursorList count=\"" << spec.getPrecursors().size() << "\">\n"; for (Size p = 0; p != spec.getPrecursors().size(); ++p) { writePrecursor_(os, spec.getPrecursors()[p], validator); } os << "\t\t\t</precursorList>\n"; } //-------------------------------------------------------------------------------------------- //product list //-------------------------------------------------------------------------------------------- if (spec.getProducts().size() != 0) { os << "\t\t\t\t<productList count=\"" << spec.getProducts().size() << "\">\n"; for (Size p = 0; p < spec.getProducts().size(); ++p) { writeProduct_(os, spec.getProducts()[p], validator); } os << "\t\t\t\t</productList>\n"; } //-------------------------------------------------------------------------------------------- //binary data array list //-------------------------------------------------------------------------------------------- if (spec.size() != 0) { String encoded_string; os << "\t\t\t\t<binaryDataArrayList count=\"" << (2 + spec.getFloatDataArrays().size() + spec.getStringDataArrays().size() + spec.getIntegerDataArrays().size()) << "\">\n"; writeContainerData<SpectrumType>(os, options_, spec, "mz"); writeContainerData<SpectrumType>(os, options_, spec, "intensity"); String compression_term = MzMLHandlerHelper::getCompressionTerm_(options_, options_.getNumpressConfigurationIntensity(), false); //write float data array for (Size m = 0; m < spec.getFloatDataArrays().size(); ++m) { const typename SpectrumType::FloatDataArray& array = spec.getFloatDataArrays()[m]; std::vector<double> data64_to_encode(array.size()); for (Size p = 0; p < array.size(); ++p) data64_to_encode[p] = array[p]; // TODO also encode float data arrays using numpress? decoder_.encode(data64_to_encode, Base64::BYTEORDER_LITTLEENDIAN, encoded_string, options_.getCompression()); String data_processing_ref_string = ""; if (array.getDataProcessing().size() != 0) { data_processing_ref_string = String("dataProcessingRef=\"dp_sp_") + s + "_bi_" + m + "\""; } os << "\t\t\t\t\t<binaryDataArray arrayLength=\"" << array.size() << "\" encodedLength=\"" << encoded_string.size() << "\" " << data_processing_ref_string << ">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000523\" name=\"64-bit float\" />\n"; os << "\t\t\t\t\t\t" << compression_term << "\n"; ControlledVocabulary::CVTerm bi_term = getChildWithName_("MS:1000513", array.getName()); if (bi_term.id != "") { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << bi_term.id << "\" name=\"" << bi_term.name << "\" />\n"; } else { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000786\" name=\"non-standard data array\" value=\"" << array.getName() << "\" />\n"; } writeUserParam_(os, array, 6, "/mzML/run/spectrumList/spectrum/binaryDataArrayList/binaryDataArray/cvParam/@accession", validator); os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } //write integer data array for (Size m = 0; m < spec.getIntegerDataArrays().size(); ++m) { const typename SpectrumType::IntegerDataArray& array = spec.getIntegerDataArrays()[m]; std::vector<Int64> data64_to_encode(array.size()); for (Size p = 0; p < array.size(); ++p) data64_to_encode[p] = array[p]; decoder_.encodeIntegers(data64_to_encode, Base64::BYTEORDER_LITTLEENDIAN, encoded_string, options_.getCompression()); String data_processing_ref_string = ""; if (array.getDataProcessing().size() != 0) { data_processing_ref_string = String("dataProcessingRef=\"dp_sp_") + s + "_bi_" + m + "\""; } os << "\t\t\t\t\t<binaryDataArray arrayLength=\"" << array.size() << "\" encodedLength=\"" << encoded_string.size() << "\" " << data_processing_ref_string << ">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000522\" name=\"64-bit integer\" />\n"; os << "\t\t\t\t\t\t" << compression_term << "\n"; ControlledVocabulary::CVTerm bi_term = getChildWithName_("MS:1000513", array.getName()); if (bi_term.id != "") { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << bi_term.id << "\" name=\"" << bi_term.name << "\" />\n"; } else { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000786\" name=\"non-standard data array\" value=\"" << array.getName() << "\" />\n"; } writeUserParam_(os, array, 6, "/mzML/run/spectrumList/spectrum/binaryDataArrayList/binaryDataArray/cvParam/@accession", validator); os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } //write string data arrays for (Size m = 0; m < spec.getStringDataArrays().size(); ++m) { const typename SpectrumType::StringDataArray& array = spec.getStringDataArrays()[m]; std::vector<String> data_to_encode; data_to_encode.resize(array.size()); for (Size p = 0; p < array.size(); ++p) data_to_encode[p] = array[p]; decoder_.encodeStrings(data_to_encode, encoded_string, options_.getCompression()); String data_processing_ref_string = ""; if (array.getDataProcessing().size() != 0) { data_processing_ref_string = String("dataProcessingRef=\"dp_sp_") + s + "_bi_" + m + "\""; } os << "\t\t\t\t\t<binaryDataArray arrayLength=\"" << array.size() << "\" encodedLength=\"" << encoded_string.size() << "\" " << data_processing_ref_string << ">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001479\" name=\"null-terminated ASCII string\" />\n"; os << "\t\t\t\t\t\t" << compression_term << "\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000786\" name=\"non-standard data array\" value=\"" << array.getName() << "\" />\n"; writeUserParam_(os, array, 6, "/mzML/run/spectrumList/spectrum/binaryDataArrayList/binaryDataArray/cvParam/@accession", validator); os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } os << "\t\t\t\t</binaryDataArrayList>\n"; } os << "\t\t\t</spectrum>\n"; } template <typename MapType> void MzMLHandler<MapType>::writeChromatogram_(std::ostream& os, const ChromatogramType& chromatogram, Size c, Internal::MzMLValidator& validator) { long offset = os.tellp(); chromatograms_offsets.push_back(make_pair(chromatogram.getNativeID(), offset + 6)); // TODO native id with chromatogram=?? prefix? // IMPORTANT make sure the offset (above) corresponds to the start of the <chromatogram tag os << " <chromatogram id=\"" << writeXMLEscape(chromatogram.getNativeID()) << "\" index=\"" << c << "\" defaultArrayLength=\"" << chromatogram.size() << "\">" << "\n"; // write cvParams (chromatogram type) if (chromatogram.getChromatogramType() == ChromatogramSettings::MASS_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000810\" name=\"mass chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::TOTAL_ION_CURRENT_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000235\" name=\"total ion current chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::SELECTED_ION_CURRENT_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000627\" name=\"selected ion current chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::BASEPEAK_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000628\" name=\"basepeak chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::SELECTED_ION_MONITORING_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001472\" name=\"selected ion monitoring chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::SELECTED_REACTION_MONITORING_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001473\" name=\"selected reaction monitoring chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::ELECTROMAGNETIC_RADIATION_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000811\" name=\"electromagnetic radiation chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::ABSORPTION_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000812\" name=\"absorption chromatogram\" />\n"; } else if (chromatogram.getChromatogramType() == ChromatogramSettings::EMISSION_CHROMATOGRAM) { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000813\" name=\"emission chromatogram\" />\n"; } else { // TODO } writePrecursor_(os, chromatogram.getPrecursor(), validator); writeProduct_(os, chromatogram.getProduct(), validator); //-------------------------------------------------------------------------------------------- //binary data array list //-------------------------------------------------------------------------------------------- String compression_term; String encoded_string; os << "\t\t\t\t<binaryDataArrayList count=\"" << (2 + chromatogram.getFloatDataArrays().size() + chromatogram.getStringDataArrays().size() + chromatogram.getIntegerDataArrays().size()) << "\">\n"; writeContainerData<ChromatogramType>(os, options_, chromatogram, "time"); writeContainerData<ChromatogramType>(os, options_, chromatogram, "intensity"); compression_term = MzMLHandlerHelper::getCompressionTerm_(options_, options_.getNumpressConfigurationIntensity(), false); //write float data array for (Size m = 0; m < chromatogram.getFloatDataArrays().size(); ++m) { const typename ChromatogramType::FloatDataArray& array = chromatogram.getFloatDataArrays()[m]; std::vector<double> data64_to_encode(array.size()); for (Size p = 0; p < array.size(); ++p) data64_to_encode[p] = array[p]; // TODO also encode float data arrays using numpress? decoder_.encode(data64_to_encode, Base64::BYTEORDER_LITTLEENDIAN, encoded_string, options_.getCompression()); String data_processing_ref_string = ""; if (array.getDataProcessing().size() != 0) { data_processing_ref_string = String("dataProcessingRef=\"dp_sp_") + c + "_bi_" + m + "\""; } os << "\t\t\t\t\t<binaryDataArray arrayLength=\"" << array.size() << "\" encodedLength=\"" << encoded_string.size() << "\" " << data_processing_ref_string << ">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000523\" name=\"64-bit float\" />\n"; os << "\t\t\t\t\t\t" << compression_term << "\n"; ControlledVocabulary::CVTerm bi_term = getChildWithName_("MS:1000513", array.getName()); if (bi_term.id != "") { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << bi_term.id << "\" name=\"" << bi_term.name << "\" />\n"; } else { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000786\" name=\"non-standard data array\" value=\"" << array.getName() << "\" />\n"; } writeUserParam_(os, array, 6, "/mzML/run/chromatogramList/chromatogram/binaryDataArrayList/binaryDataArray/cvParam/@accession", validator); os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } //write integer data array for (Size m = 0; m < chromatogram.getIntegerDataArrays().size(); ++m) { const typename ChromatogramType::IntegerDataArray& array = chromatogram.getIntegerDataArrays()[m]; std::vector<Int64> data64_to_encode(array.size()); for (Size p = 0; p < array.size(); ++p) data64_to_encode[p] = array[p]; decoder_.encodeIntegers(data64_to_encode, Base64::BYTEORDER_LITTLEENDIAN, encoded_string, options_.getCompression()); String data_processing_ref_string = ""; if (array.getDataProcessing().size() != 0) { data_processing_ref_string = String("dataProcessingRef=\"dp_sp_") + c + "_bi_" + m + "\""; } os << "\t\t\t\t\t<binaryDataArray arrayLength=\"" << array.size() << "\" encodedLength=\"" << encoded_string.size() << "\" " << data_processing_ref_string << ">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000522\" name=\"64-bit integer\" />\n"; os << "\t\t\t\t\t\t" << compression_term << "\n"; ControlledVocabulary::CVTerm bi_term = getChildWithName_("MS:1000513", array.getName()); if (bi_term.id != "") { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"" << bi_term.id << "\" name=\"" << bi_term.name << "\" />\n"; } else { os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000786\" name=\"non-standard data array\" value=\"" << array.getName() << "\" />\n"; } writeUserParam_(os, array, 6, "/mzML/run/chromatogramList/chromatogram/binaryDataArrayList/binaryDataArray/cvParam/@accession", validator); os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } //write string data arrays for (Size m = 0; m < chromatogram.getStringDataArrays().size(); ++m) { const typename ChromatogramType::StringDataArray& array = chromatogram.getStringDataArrays()[m]; std::vector<String> data_to_encode; data_to_encode.resize(array.size()); for (Size p = 0; p < array.size(); ++p) data_to_encode[p] = array[p]; decoder_.encodeStrings(data_to_encode, encoded_string, options_.getCompression()); String data_processing_ref_string = ""; if (array.getDataProcessing().size() != 0) { data_processing_ref_string = String("dataProcessingRef=\"dp_sp_") + c + "_bi_" + m + "\""; } os << "\t\t\t\t\t<binaryDataArray arrayLength=\"" << array.size() << "\" encodedLength=\"" << encoded_string.size() << "\" " << data_processing_ref_string << ">\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1001479\" name=\"null-terminated ASCII string\" />\n"; os << "\t\t\t\t\t\t" << compression_term << "\n"; os << "\t\t\t\t\t\t<cvParam cvRef=\"MS\" accession=\"MS:1000786\" name=\"non-standard data array\" value=\"" << array.getName() << "\" />\n"; writeUserParam_(os, array, 6, "/mzML/run/chromatogramList/chromatogram/binaryDataArrayList/binaryDataArray/cvParam/@accession", validator); os << "\t\t\t\t\t\t<binary>" << encoded_string << "</binary>\n"; os << "\t\t\t\t\t</binaryDataArray>\n"; } os << "\t\t\t\t</binaryDataArrayList>\n"; os << "\t\t\t</chromatogram>" << "\n"; } } // namespace Internal } // namespace OpenMS #endif
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if (traits == UndefinedPixelTrait) continue; if (source_traits != UndefinedPixelTrait) SetPixelChannel(image,channel,p[i],q); else if (channel == AlphaPixelChannel) SetPixelChannel(image,channel,OpaqueAlpha,q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs((double) angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case LightenCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case ModulusAddCompositeOp: case ModulusSubtractCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case BlurCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) (QuantumRange- GetPixelBlack(source_image,p)); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { pixel=Sc+Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case ModulusSubtractCompositeOp: { pixel=Sc-Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sca); break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
9699.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute schedule(static, 8) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute schedule(static, 8) for (i = 0; i < _PB_N; i++) { for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute schedule(static, 8) for (j1 = 0; j1 < _PB_M; j1++) { for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
GB_unop__signum_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__signum_fp32_fp32 // op(A') function: GB_unop_tran__signum_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_signumf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signumf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_signumf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__signum_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__signum_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kernel_template.c
#line 1 "kernel_template.c" // GENERATED CODE --- DO NOT EDIT --- // Code is produced by sasmodels.gen from sasmodels/models/MODEL.c #ifdef __OPENCL_VERSION__ # define USE_OPENCL #endif #define USE_KAHAN_SUMMATION 0 // If opencl is not available, then we are compiling a C function // Note: if using a C++ compiler, then define kernel as extern "C" #ifndef USE_OPENCL // Use SAS_DOUBLE to force the use of double even for float kernels # define SAS_DOUBLE dou ## ble # ifdef __cplusplus #include <cstdio> #include <cmath> using namespace std; #if defined(_MSC_VER) #include <limits> #include <float.h> #define kernel extern "C" __declspec( dllexport ) inline double trunc(double x) { return x>=0?floor(x):-floor(-x); } inline double fmin(double x, double y) { return x>y ? y : x; } inline double fmax(double x, double y) { return x<y ? y : x; } #define isnan(x) _isnan(x) #define isinf(x) (!_finite(x)) #define isfinite(x) _finite(x) #define NAN (std::numeric_limits<double>::quiet_NaN()) // non-signalling NaN #define INFINITY (std::numeric_limits<double>::infinity()) #define NEED_EXPM1 #define NEED_TGAMMA #define NEED_ERF #else #define kernel extern "C" #endif inline void SINCOS(double angle, double &svar, double &cvar) { svar=sin(angle); cvar=cos(angle); } # else #include <stdio.h> #if defined(__TINYC__) #include <math.h> // TODO: test isnan inline double _isnan(double x) { return x != x; } // hope this doesn't optimize away! #undef isnan #define isnan(x) _isnan(x) // Defeat the double->float conversion since we don't have tgmath inline SAS_DOUBLE trunc(SAS_DOUBLE x) { return x>=0?floor(x):-floor(-x); } inline SAS_DOUBLE fmin(SAS_DOUBLE x, SAS_DOUBLE y) { return x>y ? y : x; } inline SAS_DOUBLE fmax(SAS_DOUBLE x, SAS_DOUBLE y) { return x<y ? y : x; } #define NEED_EXPM1 #define NEED_TGAMMA #define NEED_ERF #else #include <tgmath.h> // C99 type-generic math, so sin(float) => sinf #endif // MSVC doesn't support C99, so no need for dllexport on C99 branch #define kernel #define SINCOS(angle,svar,cvar) do {const double _t_=angle; svar=sin(_t_);cvar=cos(_t_);} while (0) # endif # define global # define local # define constant const // OpenCL powr(a,b) = C99 pow(a,b), b >= 0 // OpenCL pown(a,b) = C99 pow(a,b), b integer # define powr(a,b) pow(a,b) # define pown(a,b) pow(a,b) #else # if defined(USE_SINCOS) # define SINCOS(angle,svar,cvar) svar=sincos(angle,&cvar) # else # define SINCOS(angle,svar,cvar) do {const double _t_=angle; svar=sin(_t_);cvar=cos(_t_);} while (0) # endif #endif #if defined(NEED_EXPM1) static SAS_DOUBLE expm1(SAS_DOUBLE x_in) { double x = (double)x_in; // go back to float for single precision kernels // Adapted from the cephes math library. // Copyright 1984 - 1992 by Stephen L. Moshier if (x != x || x == 0.0) { return x; // NaN and +/- 0 } else if (x < -0.5 || x > 0.5) { return exp(x) - 1.0; } else { const double xsq = x*x; const double p = ((( +1.2617719307481059087798E-4)*xsq +3.0299440770744196129956E-2)*xsq +9.9999999999999999991025E-1); const double q = (((( +3.0019850513866445504159E-6)*xsq +2.5244834034968410419224E-3)*xsq +2.2726554820815502876593E-1)*xsq +2.0000000000000000000897E0); double r = x * p; r = r / (q - r); return r+r; } } #endif // Standard mathematical constants: // M_E, M_LOG2E, M_LOG10E, M_LN2, M_LN10, M_PI, M_PI_2=pi/2, M_PI_4=pi/4, // M_1_PI=1/pi, M_2_PI=2/pi, M_2_SQRTPI=2/sqrt(pi), SQRT2, SQRT1_2=sqrt(1/2) // OpenCL defines M_constant_F for float constants, and nothing if double // is not enabled on the card, which is why these constants may be missing #ifndef M_PI # define M_PI 3.141592653589793 #endif #ifndef M_PI_2 # define M_PI_2 1.570796326794897 #endif #ifndef M_PI_4 # define M_PI_4 0.7853981633974483 #endif #ifndef M_E # define M_E 2.718281828459045091 #endif // Non-standard function library // pi/180, used for converting between degrees and radians // 4/3 pi for computing sphere volumes // square and cube for computing squares and cubes #ifndef M_PI_180 # define M_PI_180 0.017453292519943295 #endif #ifndef M_4PI_3 # define M_4PI_3 4.18879020478639 #endif //inline double square(double x) { return pow(x,2.0); } //inline double square(double x) { return pown(x,2); } inline double square(double x) { return x*x; } inline double cube(double x) { return x*x*x; } inline double sas_sinx_x(double x) { return x==0 ? 1.0 : sin(x)/x; } %(DEFINES)s %(SOURCES)s /* ########################################################## # # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # !! !! # # !! KEEP THIS CODE CONSISTENT WITH KERNELPY.PY !! # # !! !! # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # # ########################################################## */ #ifdef IQ_KERNEL_NAME kernel void IQ_KERNEL_NAME( global const double *q, global double *result, const int Nq, #ifdef IQ_OPEN_LOOPS #ifdef USE_OPENCL global double *loops_g, #endif local double *loops, const double cutoff, IQ_DISPERSION_LENGTH_DECLARATIONS, #endif IQ_FIXED_PARAMETER_DECLARATIONS ) { #ifdef USE_OPENCL #ifdef IQ_OPEN_LOOPS // copy loops info to local memory event_t e = async_work_group_copy(loops, loops_g, (IQ_DISPERSION_LENGTH_SUM)*2, 0); wait_group_events(1, &e); #endif int i = get_global_id(0); if (i < Nq) #else #pragma omp parallel for for (int i=0; i < Nq; i++) #endif { const double qi = q[i]; #ifdef IQ_OPEN_LOOPS double ret=0.0, norm=0.0; IQ_OPEN_LOOPS //for (int radius_i=0; radius_i < Nradius; radius_i++) { // const double radius = loops[2*(radius_i)]; // const double radius_w = loops[2*(radius_i)+1]; const double weight = IQ_WEIGHT_PRODUCT; if (weight > cutoff) { const double scattering = Iq(qi, IQ_PARAMETERS); // allow kernels to exclude invalid regions by returning NaN if (!isnan(scattering)) { ret += weight*scattering; #ifdef VOLUME_PARAMETERS norm += weight * form_volume(VOLUME_PARAMETERS); #else norm += weight; #endif } //else { printf("exclude qx,qy,I:%%g,%%g,%%g\n",qi,scattering); } } IQ_CLOSE_LOOPS // norm can only be zero if volume is zero, so no scattering result[i] = (norm > 0. ? scale*ret/norm + background : background); #else result[i] = scale*Iq(qi, IQ_PARAMETERS) + background; #endif } } #endif #ifdef IQXY_KERNEL_NAME kernel void IQXY_KERNEL_NAME( global const double *qx, global const double *qy, global double *result, const int Nq, #ifdef IQXY_OPEN_LOOPS #ifdef USE_OPENCL global double *loops_g, #endif local double *loops, const double cutoff, IQXY_DISPERSION_LENGTH_DECLARATIONS, #endif IQXY_FIXED_PARAMETER_DECLARATIONS ) { #ifdef USE_OPENCL #ifdef IQXY_OPEN_LOOPS // copy loops info to local memory event_t e = async_work_group_copy(loops, loops_g, (IQXY_DISPERSION_LENGTH_SUM)*2, 0); wait_group_events(1, &e); #endif int i = get_global_id(0); if (i < Nq) #else #pragma omp parallel for for (int i=0; i < Nq; i++) #endif { const double qxi = qx[i]; const double qyi = qy[i]; #if USE_KAHAN_SUMMATION double accumulated_error = 0.0; #endif #ifdef IQXY_OPEN_LOOPS double ret=0.0, norm=0.0; IQXY_OPEN_LOOPS //for (int radius_i=0; radius_i < Nradius; radius_i++) { // const double radius = loops[2*(radius_i)]; // const double radius_w = loops[2*(radius_i)+1]; double weight = IQXY_WEIGHT_PRODUCT; if (weight > cutoff) { const double scattering = Iqxy(qxi, qyi, IQXY_PARAMETERS); if (!isnan(scattering)) { // if scattering is bad, exclude it from sum #if defined(IQXY_HAS_THETA) // Force a nominal value for the spherical correction even when // theta is +0/180 so that there are no divide by zero problems. // For sin(theta) fixed at 0 and 180, we effectively multiply top and bottom // by 1e-6, so the effect cancels. const double spherical_correction = fmax(fabs(cos(M_PI_180*theta)), 1.e-6); weight *= spherical_correction; #endif const double next = weight * scattering; #if USE_KAHAN_SUMMATION const double y = next - accumulated_error; const double t = ret + y; accumulated_error = (t - ret) - y; ret = t; #else ret += next; #endif #ifdef VOLUME_PARAMETERS norm += weight*form_volume(VOLUME_PARAMETERS); #else norm += weight; #endif } //else { printf("exclude qx,qy,I:%%g,%%g,%%g\n",qi,scattering); } } IQXY_CLOSE_LOOPS // norm can only be zero if volume is zero, so no scattering result[i] = (norm>0. ? scale*ret/norm + background : background); #else result[i] = scale*Iqxy(qxi, qyi, IQXY_PARAMETERS) + background; #endif } } #endif
matvec.c
#include <stdio.h> #include <math.h> #include <float.h> #include <stdlib.h> #include <omp.h> #include "ctimer.h" main(int argc, char**argv) { ////// PRODUCTO MATRIZ-VECTOR x=A*b ////// // DECLARACION DE VARIABLES // // DECLARACION DE VARIABLES // double t1,t2,tucpu,tscpu; const int TALLA= 4; const long int M= 6400; const long int N= 3200; int i; int j; double *A; double *b; double *x; double *z; A=malloc(M*N*sizeof(double)); b=malloc(M*sizeof(double)); x=malloc(M*sizeof(double)); z=malloc(M*sizeof(double)); double suma; srand(time(0)); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("Programa que calcula el Producto Matriz-Vector. \n"); printf("------- \n"); // GENERACION DE DATOS // // for (i=0;i<M;i++){ for (j=0;j<N;j++) A[i+j*M]=rand() % TALLA; b[i]=rand() % TALLA; } // PRODUCTO MATRIZ-VECTOR SECUENCIAL // // // printf("Voy a empezar el Producto Matriz-Vector secuencial. \n"); printf(" ------- \n"); double alfa; ctimer(&t1,&tucpu,&tscpu); for(i=0;i<M;i++){ alfa=0.0; for (j=0;j<N;j++) alfa=alfa+A[i+j*M]*b[j]; z[i]=alfa; } ctimer(&t2,&tucpu,&tscpu); printf("Producto Producto MatxVec MatxVec secuencial secuencial z(5) %f \n",z[5]); printf(" ------- \n"); printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); // PRODUCTO MATRIZ // PRODUCTO MATRIZ-VECTOR PARALELO / VECTOR PARALELO // printf("Empiezo el producto Matriz-Vector paralelo\n"); printf(" ------- \n"); ctimer(&t1,&tucpu,&tscpu); int tb; int tid; tb=M/TALLA; omp_set_num_threads(TALLA); #pragma omp parallel private(tid,i,j,alfa) { tid = omp_get_thread_num(); for (i=0;i<tb;i++){ alfa=0; for (j=0;j<N;j++) alfa=alfa+A[i+tb*tid+j*M]*b[j]; x[i+tb*tid]=alfa; } } ctimer(&t2,&tucpu,&tscpu); // SALIDA DE RESULTADOS // printf("He terminado el Producto MatxVec paralelo \n"); printf(" ------- \n"); printf("Producto MatxVec paralelo x(5)= %f \n",x[5]); printf(" ------- \n"); // Fin del calculo del Producto Matriz-Vector paralelo printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); printf(" Comprobación \n"); suma=0.0; for (i=1;i<M;i++) suma=suma + (x[i]-z[i])*(x[i]-z[i]); suma=sqrt(suma); printf("norma dif SEQ-PAR=%f\n",suma); printf(" ------- \n"); printf("He acabado. \n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); }
GB_binop__minus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_08__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_04__minus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32) // A*D function (colscale): GB (_AxD__minus_int32) // D*A function (rowscale): GB (_DxB__minus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32) // C=scalar+B GB (_bind1st__minus_int32) // C=scalar+B' GB (_bind1st_tran__minus_int32) // C=A+scalar GB (_bind2nd__minus_int32) // C=A'+scalar GB (_bind2nd_tran__minus_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__log10_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log10_fc32_fc32) // op(A') function: GB (_unop_tran__log10_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_clog10f (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog10f (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_clog10f (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log10_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog10f (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog10f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log10_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
stepper.c
#include "stepper.h" #include <omp.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <stdbool.h> #include <stdio.h> // block_size, same for x/y #ifndef BLOCK_SIZE #define BLOCK_SIZE ((int) 100) #endif //ldoc on /** * ## Implementation * * ### Structure allocation */ central2d_t* central2d_init(float w, float h, int nx, int ny, int nfield, flux_t flux, speed_t speed, float cfl) { // We extend to a four cell buffer to avoid BC comm on odd time steps int ng = 4; central2d_t* sim = (central2d_t*) malloc(sizeof(central2d_t)); sim->nx = nx; sim->ny = ny; sim->ng = ng; sim->nfield = nfield; sim->dx = w/nx; sim->dy = h/ny; sim->flux = flux; sim->speed = speed; sim->cfl = cfl; int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; int nc = nx_all * ny_all; int N = nfield * nc; sim->u = (float*) malloc((4*N + 6*nx_all)* sizeof(float)); sim->v = sim->u + N; sim->f = sim->u + 2*N; sim->g = sim->u + 3*N; sim->scratch = sim->u + 4*N; return sim; } void central2d_free(central2d_t* sim) { free(sim->u); free(sim); } int central2d_offset(central2d_t* sim, int k, int ix, int iy) { int nx = sim->nx, ny = sim->ny, ng = sim->ng; int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; return (k*ny_all+(ng+iy))*nx_all+(ng+ix); } /** * ### Boundary conditions * * In finite volume methods, boundary conditions are typically applied by * setting appropriate values in ghost cells. For our framework, we will * apply periodic boundary conditions; that is, waves that exit one side * of the domain will enter from the other side. * * We apply the conditions by assuming that the cells with coordinates * `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are * "canonical", and setting the values for all other cells `(ix,iy)` * to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some * integers `p` and `q`. */ static inline void copy_subgrid(float* restrict dst, const float* restrict src, int nx, int ny, int stride) { for (int iy = 0; iy < ny; ++iy) for (int ix = 0; ix < nx; ++ix) dst[iy*stride+ix] = src[iy*stride+ix]; } void central2d_periodic(float* restrict u, int nx, int ny, int ng, int nfield) { // Stride and number per field int s = nx + 2*ng; int field_stride = (ny+2*ng)*s; // Offsets of left, right, top, and bottom data blocks and ghost blocks int l = nx, lg = 0; int r = ng, rg = nx+ng; int b = ny*s, bg = 0; int t = ng*s, tg = (nx+ng)*s; // Copy data into ghost cells on each side for (int k = 0; k < nfield; ++k) { float* uk = u + k*field_stride; copy_subgrid(uk+lg, uk+l, ng, ny+2*ng, s); copy_subgrid(uk+rg, uk+r, ng, ny+2*ng, s); copy_subgrid(uk+tg, uk+t, nx+2*ng, ng, s); copy_subgrid(uk+bg, uk+b, nx+2*ng, ng, s); } } /** * ### Derivatives with limiters * * In order to advance the time step, we also need to estimate * derivatives of the fluxes and the solution values at each cell. * In order to maintain stability, we apply a limiter here. * * The minmod limiter *looks* like it should be expensive to computer, * since superficially it seems to require a number of branches. * We do something a little tricky, getting rid of the condition * on the sign of the arguments using the `copysign` instruction. * If the compiler does the "right" thing with `max` and `min` * for floating point arguments (translating them to branch-free * intrinsic operations), this implementation should be relatively fast. */ // Branch-free computation of minmod of two numbers times 2s static inline float xmin2s(float s, float a, float b) { float sa = copysignf(s, a); float sb = copysignf(s, b); float abs_a = fabsf(a); float abs_b = fabsf(b); float min_abs = (abs_a < abs_b ? abs_a : abs_b); return (sa+sb) * min_abs; } // Limited combined slope estimate static inline float limdiff(float um, float u0, float up) { const float theta = 2.0; const float quarter = 0.25; float du1 = u0-um; // Difference to left float du2 = up-u0; // Difference to right float duc = up-um; // Twice centered difference return xmin2s( quarter, xmin2s(theta, du1, du2), duc ); } // Compute limited derivs static inline void limited_deriv1(float* restrict du, const float* restrict u, int ncell) { for (int i = 0; i < ncell; ++i) du[i] = limdiff(u[i-1], u[i], u[i+1]); } // Compute limited derivs across stride static inline void limited_derivk(float* restrict du, const float* restrict u, int ncell, int stride) { assert(stride > 0); for (int i = 0; i < ncell; ++i) du[i] = limdiff(u[i-stride], u[i], u[i+stride]); } /** * ### Advancing a time step * * Take one step of the numerical scheme. This consists of two pieces: * a first-order corrector computed at a half time step, which is used * to obtain new $F$ and $G$ values; and a corrector step that computes * the solution at the full step. For full details, we refer to the * [Jiang and Tadmor paper][jt]. * * The `compute_step` function takes two arguments: the `io` flag * which is the time step modulo 2 (0 if even, 1 if odd); and the `dt` * flag, which actually determines the time step length. We need * to know the even-vs-odd distinction because the Jiang-Tadmor * scheme alternates between a primary grid (on even steps) and a * staggered grid (on odd steps). This means that the data at $(i,j)$ * in an even step and the data at $(i,j)$ in an odd step represent * values at different locations in space, offset by half a space step * in each direction. Every other step, we shift things back by one * mesh cell in each direction, essentially resetting to the primary * indexing scheme. * * We're slightly tricky in the corrector in that we write * $$ * v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j)) * $$ * where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the * update formula, and $d(i,j)$ the $y$-derivative terms. This cuts * the arithmetic cost a little (not that it's that big to start). * It also makes it more obvious that we only need four rows worth * of scratch space. */ // Predictor half-step static void central2d_predict(float* restrict v, float* restrict scratch, const float* restrict u, const float* restrict f, const float* restrict g, float dtcdx2, float dtcdy2, int nx, int ny, int nfield) { float* restrict fx = scratch; float* restrict gy = scratch+nx; for (int k = 0; k < nfield; ++k) { for (int iy = 1; iy < ny-1; ++iy) { int offset = (k*ny+iy)*nx+1; limited_deriv1(fx+1, f+offset, nx-2); limited_derivk(gy+1, g+offset, nx-2, nx); for (int ix = 1; ix < nx-1; ++ix) { int offset = (k*ny+iy)*nx+ix; v[offset] = u[offset] - dtcdx2 * fx[ix] - dtcdy2 * gy[ix]; } } } } // Corrector static void central2d_correct_sd(float* restrict s, float* restrict d, const float* restrict ux, const float* restrict uy, const float* restrict u, const float* restrict f, const float* restrict g, float dtcdx2, float dtcdy2, int xlo, int xhi) { for (int ix = xlo; ix < xhi; ++ix) s[ix] = 0.2500f * (u [ix] + u [ix+1]) + 0.0625f * (ux[ix] - ux[ix+1]) + dtcdx2 * (f [ix] - f [ix+1]); for (int ix = xlo; ix < xhi; ++ix) d[ix] = 0.0625f * (uy[ix] + uy[ix+1]) + dtcdy2 * (g [ix] + g [ix+1]); } // Corrector static void central2d_correct(float* restrict v, float* restrict scratch, const float* restrict u, const float* restrict f, const float* restrict g, float dtcdx2, float dtcdy2, int xlo, int xhi, int ylo, int yhi, int nx, int ny, int nfield) { assert(0 <= xlo && xlo < xhi && xhi <= nx); assert(0 <= ylo && ylo < yhi && yhi <= ny); float* restrict ux = scratch; float* restrict uy = scratch + nx; float* restrict s0 = scratch + 2*nx; float* restrict d0 = scratch + 3*nx; float* restrict s1 = scratch + 4*nx; float* restrict d1 = scratch + 5*nx; for (int k = 0; k < nfield; ++k) { float* restrict vk = v + k*ny*nx; const float* restrict uk = u + k*ny*nx; const float* restrict fk = f + k*ny*nx; const float* restrict gk = g + k*ny*nx; limited_deriv1(ux+1, uk+ylo*nx+1, nx-2); limited_derivk(uy+1, uk+ylo*nx+1, nx-2, nx); central2d_correct_sd(s1, d1, ux, uy, uk + ylo*nx, fk + ylo*nx, gk + ylo*nx, dtcdx2, dtcdy2, xlo, xhi); for (int iy = ylo; iy < yhi; ++iy) { float* tmp; tmp = s0; s0 = s1; s1 = tmp; tmp = d0; d0 = d1; d1 = tmp; limited_deriv1(ux+1, uk+(iy+1)*nx+1, nx-2); limited_derivk(uy+1, uk+(iy+1)*nx+1, nx-2, nx); central2d_correct_sd(s1, d1, ux, uy, uk + (iy+1)*nx, fk + (iy+1)*nx, gk + (iy+1)*nx, dtcdx2, dtcdy2, xlo, xhi); for (int ix = xlo; ix < xhi; ++ix) vk[iy*nx+ix] = (s1[ix]+s0[ix])-(d1[ix]-d0[ix]); } } } static void central2d_step(float* restrict u, float* restrict v, float* restrict scratch, float* restrict f, float* restrict g, int io, int nx, int ny, int ng, int nfield, flux_t flux, speed_t speed, float dt, float dx, float dy) { int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; float dtcdx2 = 0.5 * dt / dx; float dtcdy2 = 0.5 * dt / dy; flux(f, g, u, nx_all * ny_all, nx_all * ny_all); central2d_predict(v, scratch, u, f, g, dtcdx2, dtcdy2, nx_all, ny_all, nfield); // Flux values of f and g at half step for (int iy = 1; iy < ny_all-1; ++iy) { int jj = iy*nx_all+1; flux(f+jj, g+jj, v+jj, nx_all-2, nx_all * ny_all); } central2d_correct(v+io*(nx_all+1), scratch, u, f, g, dtcdx2, dtcdy2, ng-io, nx+ng-io, ng-io, ny+ng-io, nx_all, ny_all, nfield); } /** * ### Advance a fixed time * * The `run` method advances from time 0 (initial conditions) to time * `tfinal`. Note that `run` can be called repeatedly; for example, * we might want to advance for a period of time, write out a picture, * advance more, and write another picture. In this sense, `tfinal` * should be interpreted as an offset from the time represented by * the simulator at the start of the call, rather than as an absolute time. * * We always take an even number of steps so that the solution * at the end lives on the main grid instead of the staggered grid. */ void do_copy_in(float* restrict u, float* restrict bu, int stride, int bstride, int bh, int bw, int nfield, int field_stride, int bfield_stride) { for (int k = 0; k < nfield; ++k){ float* uk = u + k*field_stride; float* buk = bu + k*bfield_stride; for (int iy = 0; iy < bh; ++iy){ for (int ix = 0; ix < bw; ++ix){ buk[iy*bstride+ix] = uk[iy*stride+ix]; } } } } void do_copy_out(float* restrict u, float* restrict bu, int stride, int bstride, int bh, int bw, int nfield, int field_stride, int bfield_stride) { for (int k = 0; k < nfield; ++k){ float* uk = u + k*field_stride; float* buk = bu + k*bfield_stride; for (int iy = 0; iy < bh; ++iy){ for (int ix = 0; ix < bw; ++ix){ uk[iy*stride+ix] = buk[iy*bstride+ix]; } } } } static int central2d_xrun(float* restrict u, float* restrict v, float* restrict scratch, float* restrict f, float* restrict g, int nx, int ny, int ng, int nfield, flux_t flux, speed_t speed, float tfinal, float dx, float dy, float cfl) { const int M = (nx%BLOCK_SIZE ? nx/BLOCK_SIZE + 1: nx/BLOCK_SIZE); // number of blocks in x/y direction printf("M: %d\n", M); int nstep = 0; int nx_all = nx + 2*ng; int ny_all = ny + 2*ng; int nc = nx_all * ny_all; int N = nc * nfield; bool done = false; float t = 0; //int max_threads = omp_get_max_threads(); while (!done) { float cxy[2] = {1.0e-15f, 1.0e-15f}; central2d_periodic(u, nx, ny, ng, nfield); speed(cxy, u, nx_all * ny_all, nx_all * ny_all); float dt = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy); if (t + 2*dt >= tfinal) { dt = (tfinal-t)/2; done = true; } float* uk = (float*) malloc((4*N + 6*nx_all)* sizeof(float)); // copy out blocking values to uk /** * Use openmp to parallize the for loop * * */ #pragma omp parallel for // blocking of the full u for (int bj = 0; bj < M; ++bj){ const int j = bj * BLOCK_SIZE; int bny = (j + BLOCK_SIZE > ny ? ny-j : BLOCK_SIZE); int bny_all = bny + 2*ng; for (int bi = 0; bi < M; ++bi){ const int i = bi * BLOCK_SIZE; int bnx = (i + BLOCK_SIZE > nx ? nx-i : BLOCK_SIZE); int bnx_all = bnx + 2*ng; int bnxy = bnx_all > bny_all ? bnx_all : bny_all; int bnc = bnxy * bnxy; int bN = nfield * bnc; float* bu = (float*) malloc((4*bN + 6*bnxy)* sizeof(float)); float* bv = bu + bN; float* bf = bu + 2*bN; float* bg = bu + 3*bN; float* bscratch = bu + 4*bN; // copy u into the blocking u, w/ ghost cells of blocking u // offset when the segment is not square int jj = bny < bnx ? BLOCK_SIZE - bny : 0; int ii = bnx < bny ? BLOCK_SIZE - bnx : 0; do_copy_in(u + (j-jj)*nx_all + (i-ii), bu, ny_all, bnxy, bnxy, bnxy, nfield, nc, bnc); // 2 time step update central2d_step(bu, bv, bscratch, bf, bg, 0, bnxy-2*ng+4, bnxy-2*ng+4, ng-2, nfield, flux, speed, dt, dx, dy); central2d_step(bv, bu, bscratch, bf, bg, 1, bnxy-2*ng, bnxy-2*ng, ng, nfield, flux, speed, dt, dx, dy); // copy blocking u out of the original u, w/o ghost cells of blocking u do_copy_out(uk + (ng+j)*nx_all + ng + i, bu + (ng+jj)*bnxy + ng + ii, nx_all, bnxy, bny, bnx, nfield, nc, bnc); free(bu); } } do_copy_out(u + ng*nx_all + ng, uk + ng*nx_all + ng, ny_all, ny_all, ny, nx, nfield, nc, nc); free(uk); t += 2*dt; nstep += 2; } return nstep; } int central2d_run(central2d_t* sim, float tfinal) { return central2d_xrun(sim->u, sim->v, sim->scratch, sim->f, sim->g, sim->nx, sim->ny, sim->ng, sim->nfield, sim->flux, sim->speed, tfinal, sim->dx, sim->dy, sim->cfl); }
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImageChannel method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImageChannel(Image *image, % const ChannelType channel,const MagickEvaluateOperator op, % const double value,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels) { register ssize_t i; assert(pixels != (MagickPixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (MagickPixelPacket *) NULL) pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static MagickPixelPacket **AcquirePixelThreadSet(const Image *image, const size_t number_images) { register ssize_t i, j; MagickPixelPacket **pixels; size_t length, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (MagickPixelPacket **) NULL) return((MagickPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { length=image->columns; if (length < number_images) length=number_images; pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length, sizeof(**pixels)); if (pixels[i] == (MagickPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) length; j++) GetMagickPixelPacket(image,&pixels[i][j]); } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const MagickPixelPacket *color_1, *color_2; int intensity; color_1=(const MagickPixelPacket *) x; color_2=(const MagickPixelPacket *) y; intensity=(int) MagickPixelIntensity(color_2)- (int) MagickPixelIntensity(color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info, const Quantum pixel,const MagickEvaluateOperator op, const MagickRealType value) { MagickRealType result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(MagickRealType) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() which returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale* pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, ImpulseNoise,value); break; } case LaplacianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value* pixel+1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(MagickRealType) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MedianEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MinEvaluateOperator: { result=(MagickRealType) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(MagickRealType) (value*pixel); break; } case OrEvaluateOperator: { result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, PoissonNoise,value); break; } case PowEvaluateOperator: { result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5)); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(MagickRealType) (pixel-value); break; } case SumEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, UniformNoise,value); break; } case XorEvaluateOperator: { result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { MagickBooleanType status; status=EvaluateImageChannel(image,CompositeChannels,op,value,exception); return(status); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; Image *image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **restrict evaluate_pixels, zero; RandomInfo **restrict random_info; size_t number_images; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=CloneImage(images,images->columns,images->rows,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images,number_images); if (evaluate_pixels == (MagickPixelPacket **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y, image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_images; i++) evaluate_pixel[i]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),op,evaluate_pixel[i].red); evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),op,evaluate_pixel[i].green); evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),op,evaluate_pixel[i].blue); evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id], GetPixelOpacity(p),op,evaluate_pixel[i].opacity); if (image->colorspace == CMYKColorspace) evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id], *indexes,op,evaluate_pixel[i].index); image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue)); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[i/2].opacity)); else SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+i,ClampToQuantum( evaluate_pixel[i/2].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register ssize_t i, x; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y, image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) evaluate_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) next->columns; x++) { evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].red); evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].green); evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].blue); evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id], GetPixelOpacity(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].opacity); if (image->colorspace == CMYKColorspace) evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id], GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].index); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (op == MeanEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { evaluate_pixel[x].red/=number_images; evaluate_pixel[x].green/=number_images; evaluate_pixel[x].blue/=number_images; evaluate_pixel[x].opacity/=number_images; evaluate_pixel[x].index/=number_images; } if (op == MultiplyEvaluateOperator) for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) { evaluate_pixel[x].red*=(MagickRealType) QuantumScale; evaluate_pixel[x].green*=(MagickRealType) QuantumScale; evaluate_pixel[x].blue*=(MagickRealType) QuantumScale; evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale; evaluate_pixel[x].index*=(MagickRealType) QuantumScale; } } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue)); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[x].opacity)); else SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+x,ClampToQuantum( evaluate_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImageChannel(Image *image, const ChannelType channel,const MagickEvaluateOperator op,const double value, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #endif image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id], GetPixelRed(q),op,value))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id], GetPixelGreen(q),op,value))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(ApplyEvaluateOperator(random_info[id], GetPixelBlue(q),op,value))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelOpacity(q),op,value))); else SetPixelAlpha(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],(Quantum) GetPixelAlpha(q),op,value))); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelIndex(indexes+x),op,value))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImageChannel) #endif proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImageChannel method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % MagickBooleanType FunctionImageChannel(Image *image, % const ChannelType channel,const MagickFunction function, % const ssize_t number_parameters,const double *argument, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { MagickRealType result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* * Polynomial * Parameters: polynomial constants, highest to lowest order * For example: c0*x^3 + c1*x^2 + c2*x + c3 */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel + parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { /* Sinusoid Function * Parameters: Freq, Phase, Ampl, bias */ double freq,phase,ampl,bias; freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0; phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0; ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI* (freq*QuantumScale*pixel + phase/360.0) )) + bias ) ); break; } case ArcsinFunction: { /* Arcsin Function (peged at range limits for invalid results) * Parameters: Width, Center, Range, Bias */ double width,range,center,bias; width = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result = 2.0/width*(QuantumScale*pixel - center); if ( result <= -1.0 ) result = bias - range/2.0; else if ( result >= 1.0 ) result = bias + range/2.0; else result=(MagickRealType) (range/MagickPI*asin((double) result)+bias); result *= QuantumRange; break; } case ArctanFunction: { /* Arctan Function * Parameters: Slope, Center, Range, Bias */ double slope,range,center,bias; slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center)); result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double) result) + bias ) ); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { MagickBooleanType status; status=FunctionImageChannel(image,CompositeChannels,function, number_parameters,parameters,exception); return(status); } MagickExport MagickBooleanType FunctionImageChannel(Image *image, const ChannelType channel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ApplyFunction(GetPixelRed(q),function, number_parameters,parameters,exception)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function, number_parameters,parameters,exception)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function, number_parameters,parameters,exception)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function, number_parameters,parameters,exception)); else SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function, number_parameters,parameters,exception)); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function, number_parameters,parameters,exception)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FunctionImageChannel) #endif proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelExtrema() returns the extrema of one or more image channels. % % The format of the GetImageChannelExtrema method is: % % MagickBooleanType GetImageChannelExtrema(const Image *image, % const ChannelType channel,size_t *minima,size_t *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image, const ChannelType channel,size_t *minima,size_t *maxima, ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageChannelRange(image,channel,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMean() returns the mean and standard deviation of one or more % image channels. % % The format of the GetImageChannelMean method is: % % MagickBooleanType GetImageChannelMean(const Image *image, % const ChannelType channel,double *mean,double *standard_deviation, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation, exception); return(status); } MagickExport MagickBooleanType GetImageChannelMean(const Image *image, const ChannelType channel,double *mean,double *standard_deviation, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; size_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageChannelStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); channels=0; channel_statistics[CompositeChannels].mean=0.0; channel_statistics[CompositeChannels].standard_deviation=0.0; if ((channel & RedChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[RedChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[RedChannel].variance- channel_statistics[RedChannel].mean* channel_statistics[RedChannel].mean; channels++; } if ((channel & GreenChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[GreenChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[GreenChannel].variance- channel_statistics[GreenChannel].mean* channel_statistics[GreenChannel].mean; channels++; } if ((channel & BlueChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlueChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlueChannel].variance- channel_statistics[BlueChannel].mean* channel_statistics[BlueChannel].mean; channels++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { channel_statistics[CompositeChannels].mean+= channel_statistics[OpacityChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[OpacityChannel].variance- channel_statistics[OpacityChannel].mean* channel_statistics[OpacityChannel].mean; channels++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlackChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlackChannel].variance- channel_statistics[BlackChannel].mean* channel_statistics[BlackChannel].mean; channels++; } channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); *mean=channel_statistics[CompositeChannels].mean; *standard_deviation=channel_statistics[CompositeChannels].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelKurtosis() returns the kurtosis and skewness of one or more % image channels. % % The format of the GetImageChannelKurtosis method is: % % MagickBooleanType GetImageChannelKurtosis(const Image *image, % const ChannelType channel,double *kurtosis,double *skewness, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness, exception); return(status); } MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image, const ChannelType channel,double *kurtosis,double *skewness, ExceptionInfo *exception) { double area, mean, standard_deviation, sum_squares, sum_cubes, sum_fourth_power; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *kurtosis=0.0; *skewness=0.0; area=0.0; mean=0.0; standard_deviation=0.0; sum_squares=0.0; sum_cubes=0.0; sum_fourth_power=0.0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { mean+=GetPixelRed(p); sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p); sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p)*GetPixelRed(p); area++; } if ((channel & GreenChannel) != 0) { mean+=GetPixelGreen(p); sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p); sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p); sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); area++; } if ((channel & BlueChannel) != 0) { mean+=GetPixelBlue(p); sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p); sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p); sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); area++; } if ((channel & OpacityChannel) != 0) { mean+=GetPixelOpacity(p); sum_squares+=(double) GetPixelOpacity(p)*GetPixelOpacity(p); sum_cubes+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p); sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p)*GetPixelOpacity(p); area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { mean+=GetPixelIndex(indexes+x); sum_squares+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); sum_cubes+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); sum_fourth_power+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); area++; } p++; } } if (y < (ssize_t) image->rows) return(MagickFalse); if (area != 0.0) { mean/=area; sum_squares/=area; sum_cubes/=area; sum_fourth_power/=area; } standard_deviation=sqrt(sum_squares-(mean*mean)); if (standard_deviation != 0.0) { *kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares- 3.0*mean*mean*mean*mean; *kurtosis/=standard_deviation*standard_deviation*standard_deviation* standard_deviation; *kurtosis-=3.0; *skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean; *skewness/=standard_deviation*standard_deviation*standard_deviation; } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelRange() returns the range of one or more image channels. % % The format of the GetImageChannelRange method is: % % MagickBooleanType GetImageChannelRange(const Image *image, % const ChannelType channel,double *minima,double *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image, double *minima,double *maxima,ExceptionInfo *exception) { return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelRange(const Image *image, const ChannelType channel,double *minima,double *maxima, ExceptionInfo *exception) { MagickPixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *maxima=(-MagickHuge); *minima=MagickHuge; GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((channel & RedChannel) != 0) { if (pixel.red < *minima) *minima=(double) pixel.red; if (pixel.red > *maxima) *maxima=(double) pixel.red; } if ((channel & GreenChannel) != 0) { if (pixel.green < *minima) *minima=(double) pixel.green; if (pixel.green > *maxima) *maxima=(double) pixel.green; } if ((channel & BlueChannel) != 0) { if (pixel.blue < *minima) *minima=(double) pixel.blue; if (pixel.blue > *maxima) *maxima=(double) pixel.blue; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { if (pixel.opacity < *minima) *minima=(double) pixel.opacity; if (pixel.opacity > *maxima) *maxima=(double) pixel.opacity; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((double) GetPixelIndex(indexes+x) < *minima) *minima=(double) GetPixelIndex(indexes+x); if ((double) GetPixelIndex(indexes+x) > *maxima) *maxima=(double) GetPixelIndex(indexes+x); } p++; } } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelStatistics() returns statistics for each channel in the % image. The statistics include the channel depth, its minima, maxima, mean, % standard deviation, kurtosis and skewness. You can access the red channel % mean, for example, like this: % % channel_statistics=GetImageChannelStatistics(image,exception); % red_mean=channel_statistics[RedChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageChannelStatistics method is: % % ChannelStatistics *GetImageChannelStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area; MagickStatusType status; QuantumAny range; register ssize_t i; size_t channels, depth, length; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length, sizeof(*channel_statistics)); if (channel_statistics == (ChannelStatistics *) NULL) return(channel_statistics); (void) ResetMagickMemory(channel_statistics,0,length* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) CompositeChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickHuge); channel_statistics[i].minima=MagickHuge; } for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; ) { if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[RedChannel].depth; range=GetQuantumRange(depth); status=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelRed(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[RedChannel].depth++; continue; } } if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[GreenChannel].depth; range=GetQuantumRange(depth); status=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelGreen(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[GreenChannel].depth++; continue; } } if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlueChannel].depth; range=GetQuantumRange(depth); status=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelBlue(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[BlueChannel].depth++; continue; } } if (image->matte != MagickFalse) { if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[OpacityChannel].depth; range=GetQuantumRange(depth); status=GetPixelOpacity(p) != ScaleAnyToQuantum(ScaleQuantumToAny( GetPixelOpacity(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[OpacityChannel].depth++; continue; } } } if (image->colorspace == CMYKColorspace) { if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlackChannel].depth; range=GetQuantumRange(depth); status=GetPixelIndex(indexes+x) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelIndex(indexes+x),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[BlackChannel].depth++; continue; } } } if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima) channel_statistics[RedChannel].minima=(double) GetPixelRed(p); if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima) channel_statistics[RedChannel].maxima=(double) GetPixelRed(p); channel_statistics[RedChannel].sum+=GetPixelRed(p); channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)* GetPixelRed(p); channel_statistics[RedChannel].sum_cubed+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); channel_statistics[RedChannel].sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p); if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima) channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p); if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima) channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p); channel_statistics[GreenChannel].sum+=GetPixelGreen(p); channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)* GetPixelGreen(p); channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); channel_statistics[GreenChannel].sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p); if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima) channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p); if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima) channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p); channel_statistics[BlueChannel].sum+=GetPixelBlue(p); channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)* GetPixelBlue(p); channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); channel_statistics[BlueChannel].sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p); if (image->matte != MagickFalse) { if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima) channel_statistics[OpacityChannel].minima=(double) GetPixelOpacity(p); if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima) channel_statistics[OpacityChannel].maxima=(double) GetPixelOpacity(p); channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_squared+=(double) GetPixelOpacity(p)*GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_cubed+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p); } if (image->colorspace == CMYKColorspace) { if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima) channel_statistics[BlackChannel].minima=(double) GetPixelIndex(indexes+x); if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima) channel_statistics[BlackChannel].maxima=(double) GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_squared+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_cubed+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_fourth_power+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); } x++; p++; } } area=(double) image->columns*image->rows; for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[i].sum/=area; channel_statistics[i].sum_squared/=area; channel_statistics[i].sum_cubed/=area; channel_statistics[i].sum_fourth_power/=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; channel_statistics[i].standard_deviation=sqrt( channel_statistics[i].variance-(channel_statistics[i].mean* channel_statistics[i].mean)); } for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double) channel_statistics[CompositeChannels].depth,(double) channel_statistics[i].depth); channel_statistics[CompositeChannels].minima=MagickMin( channel_statistics[CompositeChannels].minima, channel_statistics[i].minima); channel_statistics[CompositeChannels].maxima=EvaluateMax( channel_statistics[CompositeChannels].maxima, channel_statistics[i].maxima); channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum; channel_statistics[CompositeChannels].sum_squared+= channel_statistics[i].sum_squared; channel_statistics[CompositeChannels].sum_cubed+= channel_statistics[i].sum_cubed; channel_statistics[CompositeChannels].sum_fourth_power+= channel_statistics[i].sum_fourth_power; channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean; channel_statistics[CompositeChannels].variance+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; } channels=3; if (image->matte != MagickFalse) channels++; if (image->colorspace == CMYKColorspace) channels++; channel_statistics[CompositeChannels].sum/=channels; channel_statistics[CompositeChannels].sum_squared/=channels; channel_statistics[CompositeChannels].sum_cubed/=channels; channel_statistics[CompositeChannels].sum_fourth_power/=channels; channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].variance/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); channel_statistics[CompositeChannels].kurtosis/=channels; channel_statistics[CompositeChannels].skewness/=channels; for (i=0; i <= (ssize_t) CompositeChannels; i++) { if (channel_statistics[i].standard_deviation == 0.0) continue; channel_statistics[i].skewness=(channel_statistics[i].sum_cubed- 3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+ 2.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power- 4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+ 6.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation)-3.0; } if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % Image *PolynomialImageChannel(const Image *images, % const size_t number_terms,const ChannelType channel, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o channel: the channel. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { Image *polynomial_image; polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms, terms,exception); return(polynomial_image); } MagickExport Image *PolynomialImageChannel(const Image *images, const ChannelType channel,const size_t number_terms,const double *terms, ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **restrict polynomial_pixels, zero; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=CloneImage(images,images->columns,images->rows,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images,number_images); if (polynomial_pixels == (MagickPixelPacket **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict polynomial_indexes; register MagickPixelPacket *polynomial_pixel; register PixelPacket *restrict q; register ssize_t i, x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view); polynomial_pixel=polynomial_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) polynomial_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; if (i >= (ssize_t) number_terms) break; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { double coefficient, degree; coefficient=terms[i << 1]; degree=terms[(i << 1)+1]; polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree); polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green, degree); polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,degree); polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale* (QuantumRange-p->opacity),degree); if (image->colorspace == CMYKColorspace) polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x], degree); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue)); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange* polynomial_pixel[x].opacity)); else SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange* polynomial_pixel[x].opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange* polynomial_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PolynomialImages) #endif proceed=SetImageProgress(images,PolynomialImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % Image *StatisticImageChannel(const Image *image, % const ChannelType channel,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ #define ListChannels 5 typedef struct _ListNode { size_t next[9], count, signature; } ListNode; typedef struct _SkipList { ssize_t level; ListNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed, signature; SkipList lists[ListChannels]; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { register ssize_t i; if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); for (i=0; i < ListChannels; i++) if (pixel_list->lists[i].nodes != (ListNode *) NULL) pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory( pixel_list->lists[i].nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; register ssize_t i; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; for (i=0; i < ListChannels; i++) { pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL, sizeof(*pixel_list->lists[i].nodes)); if (pixel_list->lists[i].nodes == (ListNode *) NULL) return(DestroyPixelList(pixel_list)); (void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL* sizeof(*pixel_list->lists[i].nodes)); } pixel_list->signature=MagickSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel, const size_t color) { register SkipList *list; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ list=pixel_list->lists+channel; list->nodes[color].signature=pixel_list->signature; list->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=list->level; level >= 0; level--) { while (list->nodes[search].next[level] < color) search=list->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (list->level+2)) level=list->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > list->level) { list->level++; update[list->level]=65536UL; } /* Link the node into the skip-list. */ do { list->nodes[color].next[level]=list->nodes[update[level]].next[level]; list->nodes[update[level]].next[level]=color; } while (level-- > 0); } static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, maximum; ssize_t count; unsigned short channels[ListChannels]; /* Find the maximum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; maximum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color > maximum) maximum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) maximum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { MagickRealType sum; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the mean value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; do { color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; channels[channel]=(unsigned short) sum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the median value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; do { color=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); channels[channel]=(unsigned short) color; } GetMagickPixelPacket((const Image *) NULL,pixel); pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, minimum; ssize_t count; unsigned short channels[ListChannels]; /* Find the minimum value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; count=0; color=65536UL; minimum=list->nodes[color].next[0]; do { color=list->nodes[color].next[0]; if (color < minimum) minimum=color; count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) minimum; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, max_count, mode; ssize_t count; unsigned short channels[5]; /* Make each pixel the 'predominant color' of the specified neighborhood. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; mode=color; max_count=list->nodes[mode].count; count=0; do { color=list->nodes[color].next[0]; if (list->nodes[color].count > max_count) { mode=color; max_count=list->nodes[mode].count; } count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); channels[channel]=(unsigned short) mode; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel) { register SkipList *list; register ssize_t channel; size_t color, next, previous; ssize_t count; unsigned short channels[5]; /* Finds the non peak value for each of the colors. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; next=list->nodes[color].next[0]; count=0; do { previous=color; color=next; next=list->nodes[color].next[0]; count+=list->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; channels[channel]=(unsigned short) color; } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static void GetStandardDeviationPixelList(PixelList *pixel_list, MagickPixelPacket *pixel) { MagickRealType sum, sum_squared; register SkipList *list; register ssize_t channel; size_t color; ssize_t count; unsigned short channels[ListChannels]; /* Find the standard-deviation value for each of the color. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; color=65536L; count=0; sum=0.0; sum_squared=0.0; do { register ssize_t i; color=list->nodes[color].next[0]; sum+=(MagickRealType) list->nodes[color].count*color; for (i=0; i < (ssize_t) list->nodes[color].count; i++) sum_squared+=((MagickRealType) color)*((MagickRealType) color); count+=list->nodes[color].count; } while (count < (ssize_t) pixel_list->length); sum/=pixel_list->length; sum_squared/=pixel_list->length; channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum)); } pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]); pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]); pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]); pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]); pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]); } static inline void InsertPixelList(const Image *image,const PixelPacket *pixel, const IndexPacket *indexes,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(GetPixelRed(pixel)); signature=pixel_list->lists[0].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[0].nodes[index].count++; else AddNodePixelList(pixel_list,0,index); index=ScaleQuantumToShort(GetPixelGreen(pixel)); signature=pixel_list->lists[1].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[1].nodes[index].count++; else AddNodePixelList(pixel_list,1,index); index=ScaleQuantumToShort(GetPixelBlue(pixel)); signature=pixel_list->lists[2].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[2].nodes[index].count++; else AddNodePixelList(pixel_list,2,index); index=ScaleQuantumToShort(GetPixelOpacity(pixel)); signature=pixel_list->lists[3].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[3].nodes[index].count++; else AddNodePixelList(pixel_list,3,index); if (image->colorspace == CMYKColorspace) index=ScaleQuantumToShort(GetPixelIndex(indexes)); signature=pixel_list->lists[4].nodes[index].signature; if (signature == pixel_list->signature) pixel_list->lists[4].nodes[index].count++; else AddNodePixelList(pixel_list,4,index); } static inline MagickRealType MagickAbsoluteValue(const MagickRealType x) { if (x < 0) return(-x); return(x); } static void ResetPixelList(PixelList *pixel_list) { int level; register ListNode *root; register SkipList *list; register ssize_t channel; /* Reset the skip-list. */ for (channel=0; channel < 5; channel++) { list=pixel_list->lists+channel; root=list->nodes+65536UL; list->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; } pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { Image *statistic_image; statistic_image=StatisticImageChannel(image,DefaultChannels,type,width, height,exception); return(statistic_image); } MagickExport Image *StatisticImageChannel(const Image *image, const ChannelType channel,const StatisticType type,const size_t width, const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **restrict pixel_list; size_t neighbor_height, neighbor_width; ssize_t y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse) { InheritException(exception,&statistic_image->exception); statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) : width; neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) : height; pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict statistic_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y- (ssize_t) (neighbor_height/2L),image->columns+neighbor_width, neighbor_height,exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view); for (x=0; x < (ssize_t) statistic_image->columns; x++) { MagickPixelPacket pixel; register const IndexPacket *restrict s; register const PixelPacket *restrict r; register ssize_t u, v; r=p; s=indexes+x; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) neighbor_height; v++) { for (u=0; u < (ssize_t) neighbor_width; u++) InsertPixelList(image,r+u,s+u,pixel_list[id]); r+=image->columns+neighbor_width; s+=image->columns+neighbor_width; } GetMagickPixelPacket(image,&pixel); SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+ neighbor_width*neighbor_height/2,&pixel); switch (type) { case GradientStatistic: { MagickPixelPacket maximum, minimum; GetMinimumPixelList(pixel_list[id],&pixel); minimum=pixel; GetMaximumPixelList(pixel_list[id],&pixel); maximum=pixel; pixel.red=MagickAbsoluteValue(maximum.red-minimum.red); pixel.green=MagickAbsoluteValue(maximum.green-minimum.green); pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue); pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity); if (image->colorspace == CMYKColorspace) pixel.index=MagickAbsoluteValue(maximum.index-minimum.index); break; } case MaximumStatistic: { GetMaximumPixelList(pixel_list[id],&pixel); break; } case MeanStatistic: { GetMeanPixelList(pixel_list[id],&pixel); break; } case MedianStatistic: default: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { GetMinimumPixelList(pixel_list[id],&pixel); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case StandardDeviationStatistic: { GetStandardDeviationPixelList(pixel_list[id],&pixel); break; } } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index)); p++; q++; } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_StatisticImage) #endif proceed=SetImageProgress(image,StatisticImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
main.c
/* Entry point for Matilda -- parses the program flags and starts the program in either GTP or text mode. Also deals with updating some internal parameters at startup time. */ #include "config.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include "alloc.h" #include "board.h" #include "cfg_board.h" #include "constants.h" #include "engine.h" #include "flog.h" #include "game_record.h" #include "mcts.h" #include "opening_book.h" #include "pts_file.h" #include "randg.h" #include "stringm.h" #include "time_ctrl.h" #include "timem.h" #include "transpositions.h" #include "version.h" #include "zobrist.h" game_record current_game; time_system current_clock_black; time_system current_clock_white; bool time_system_overriden = false; /* ignore attempts to change time system */ bool save_all_games_to_file = false; /* save all games as SGF on gameover */ bool resign_on_timeout = false; /* resign instead of passing if timed out */ bool pass_when_losing; /* whether we pass instead of resigning */ u32 limit_by_playouts = 0; /* limit MCTS by playouts instead of time */ char * sentinel_file = NULL; clock_t start_cpu_time; extern u64 max_size_in_mbs; /* For tuning */ extern double prior_stone_scale_factor; extern u16 prior_even; extern u16 prior_nakade; extern u16 prior_self_atari; extern u16 prior_attack; extern u16 prior_defend; extern u16 prior_pat3; extern u16 prior_near_last; extern u16 prior_line2; extern u16 prior_line3; extern u16 prior_empty; extern u16 prior_corner; extern u16 prior_bad_play; extern u16 prior_pass; extern u16 prior_starting_point; extern double rave_equiv; extern u16 pl_skip_saving; extern u16 pl_skip_nakade; extern u16 pl_skip_pattern; extern u16 pl_skip_capture; extern u16 pl_ban_self_atari; extern u16 expansion_delay; static u16 _dummy; /* used for testing CLOP */ const void * tunable[] = { "f", "prior_stone_scale_factor", &prior_stone_scale_factor, "i", "prior_even", &prior_even, "i", "prior_nakade", &prior_nakade, "i", "prior_self_atari", &prior_self_atari, "i", "prior_attack", &prior_attack, "i", "prior_defend", &prior_defend, "i", "prior_pat3", &prior_pat3, "i", "prior_near_last", &prior_near_last, "i", "prior_line2", &prior_line2, "i", "prior_line3", &prior_line3, "i", "prior_empty", &prior_empty, "i", "prior_corner", &prior_corner, "i", "prior_bad_play", &prior_bad_play, "i", "prior_pass", &prior_pass, "i", "prior_starting_point", &prior_starting_point, "f", "rave_equiv", &rave_equiv, "i", "pl_skip_saving", &pl_skip_saving, "i", "pl_skip_nakade", &pl_skip_nakade, "i", "pl_skip_pattern", &pl_skip_pattern, "i", "pl_skip_capture", &pl_skip_capture, "i", "pl_ban_self_atari", &pl_ban_self_atari, "i", "expansion_delay", &expansion_delay, "i", "dummy", &_dummy, NULL }; static void set_parameter( const char * restrict name, const char * restrict value ) { for (u16 i = 0; tunable[i] != NULL; i += 3) { char * sname = ((char *)tunable[i + 1]); if (strcmp(sname, name) != 0) { continue; } char * type = ((char *)tunable[i]); if (type[0] == 'i') { u32 val; if (!parse_uint(&val, value) || val > UINT16_MAX) { char * buf = alloc(); snprintf(buf, MAX_PAGE_SIZ, "unsigned integer format error: %s", value); flog_crit("init", buf); release(buf); } u16 * svar = ((u16 *)tunable[i + 2]); *svar = val; return; } if (type[0] == 'f') { double val; if (!parse_float(&val, value)) { char * buf = alloc(); snprintf(buf, MAX_PAGE_SIZ, "float format error: %s", value); flog_crit("init", buf); release(buf); } double * svar = ((double *)tunable[i + 2]); *svar = val; return; } char * buf = alloc(); snprintf(buf, MAX_PAGE_SIZ, "illegal internal parameter codification: %s", type); flog_crit("init", buf); release(buf); } char * buf = alloc(); snprintf(buf, MAX_PAGE_SIZ, "illegal parameter name: %s", name); flog_crit("init", buf); release(buf); } void main_gtp( bool think_in_opt_turn ); void main_text( bool is_black ); static void startup( bool opening_books_enabled, d16 desired_num_threads ) { assert_data_folder_exists(); if (opening_books_enabled) { opening_book_init(); } mcts_init(); load_handicap_points(); load_hoshi_points(); load_starting_points(); u32 automatic_num_threads; #pragma omp parallel #pragma omp master { automatic_num_threads = omp_get_num_threads(); } if (automatic_num_threads > MAXIMUM_NUM_THREADS) { omp_set_num_threads(MAXIMUM_NUM_THREADS); } if (desired_num_threads > 0) { omp_set_num_threads(MIN(desired_num_threads, MAXIMUM_NUM_THREADS)); } omp_set_dynamic(0); } static void usage() { fprintf(stderr, "\033[1mUSAGE\033[0m\n"); fprintf(stderr, " matilda [options]\n\n"); fprintf(stderr, "\033[1mDESCRIPTION\033[0m\n"); fprintf(stderr, " Matilda is a computer program that plays the game of Go. It uses\n Chinese rules without life in seki.\n Two interface modes are available: a simple text interface, and the Go\n Text Protocol through the standard input and output file descriptors.\n Most more advanced features, like file manipulation and game analysis,\n are only available through GTP commands. To learn more about them\n consult the file GTP_README.\n All files read and written, including SGF, reside in the data folder.\n\n"); fprintf(stderr, "\033[1mOPTIONS\033[0m\n"); fprintf(stderr, " \033[1m-m, --mode <gtp or text>\033[0m\n\n"); fprintf(stderr, " Matilda attempts to detect if its input file descriptor is a terminal\n and if it is it uses the text mode interface. Otherwise it uses the GTP\n interface. This command overrides this with the specific mode you want\n to be used.\n\n"); fprintf(stderr, " \033[1m-c, --color <black or white>\033[0m\n\n"); fprintf(stderr, " Select human player color (text mode only).\n\n"); fprintf(stderr, " \033[1m--losing <keyword>\033[0m\n\n"); fprintf(stderr, " If playing with even komi Matilda may confuse drawn positions with\n lost positions, and resign when actually winning.\n"); fprintf(stderr, " Choose whether to resign or pass when losing the match. The keyword\n argument can be resign or pass. By default Matilda resigns on text mode\n and passes on GTP mode.\n\n"); fprintf(stderr, " \033[1m--resign_on_timeout\033[0m\n\n"); fprintf(stderr, " Resign if the program believes to have lost on time. Default: false.\n\n"); fprintf(stderr, " \033[1m-t, --time <value>\033[0m\n\n"); fprintf(stderr, " Override the time system in use. A composite overtime format is used\n with four components: main time, number of periods, time per period and\n number of stones per period. Examples: 90m (suddent death), 10m+3x10s\n (Canadian overtime), 1h+30s/5 (Japanese byo-yomi), 15m+3x30s/10\n (mixed).\n\n For no time limits use 0 main time and 0 period stones, or the keyword\n infinite. Examples: 0+1m/0, infinite.\n\n Time units available: ms (milliseconds), s (seconds), m (minutes), h\n (hours). Main time value 0 does not accept a unit.\n\n"); fprintf(stderr, " \033[1m--think_in_opt_time\033[0m\n\n"); fprintf(stderr, " Continue thinking in the background while in the opponents turn.\n\n"); fprintf(stderr, " \033[1m--disable_gtp_time_control\033[0m\n\n"); fprintf(stderr, " Disable time control GTP commands.\n\n"); fprintf(stderr, " \033[1m-d, --data <path>\033[0m\n\n"); fprintf(stderr, " Override the data folder path. The folder must exist.\n\n"); fprintf(stderr, " \033[1m--disable_opening_books\033[0m\n\n"); fprintf(stderr, " Disable the use of opening books.\n\n"); fprintf(stderr, " \033[1m-l, --log <mask>\033[0m\n\n"); fprintf(stderr, " Set the message types to log to file and print to the standard error\n file descriptor. The available modes are:\n\n e - Error messages\n w - Warning messages\n p - Protocol messages\n i - Informational messages\n d - Debugging messages\n\n Default setting: --log ew\n Leave empty for no logging. Notice log printing to the standard error\n file descriptor may be muted in text mode.\n\n"); fprintf(stderr, " \033[1m--log_dest <mask>\033[0m\n\n"); fprintf(stderr, " Set the log destination. The available destinations are:\n\n o - Standard error file descriptor\n f - File (matilda_date.log)\n\n Default setting: --log_dest of\n\n"); fprintf(stderr, " \033[1m--memory <number>\033[0m\n\n"); fprintf(stderr, " Override the available memory for the MCTS transpositions table, in\n MiB. The default is %u MiB.\n\n", DEFAULT_UCT_MEMORY); fprintf(stderr, " \033[1m--save_all\033[0m\n\n"); fprintf(stderr, " Save all finished games to the data folder as SGF.\n\n"); fprintf(stderr, " \033[1m--playouts <number>\033[0m\n\n"); fprintf(stderr, " Play with a fixed number of simulations per turn instead of limited by\n time. Cannot be used with time related flags.\n\n"); fprintf(stderr, " \033[1m--threads <number>\033[0m\n\n"); fprintf(stderr, " Override the number of OpenMP threads to use. The default is the total\n number of normal plus hyperthreaded CPU cores.\n\n"); fprintf(stderr, " \033[1m--benchmark\033[0m\n\n"); fprintf(stderr, " Run a two minute benchmark of the system, returning a linear measure of\n MCTS performance (number of simulations per second.\n\n"); fprintf(stderr, " \033[1m--sentinel <filename>\033[0m\n\n"); fprintf(stderr, " Close the program after a game if the file is found, deleting the file.\n Use to interrupt online play without annoying human players. Is\n executed after commands kgs-game_over and final_score, and after a\n genmove resignation.\n\n"); fprintf(stderr, " \033[1m--set <name> <value>\033[0m\n\n"); fprintf(stderr, " For optimization. Set the value of an internal parameter.\n\n"); fprintf(stderr, " \033[1m-h, --help\033[0m\n\n"); fprintf(stderr, " Print usage information at startup and exit.\n\n"); fprintf(stderr, " \033[1m-i, --info\033[0m\n\n"); fprintf(stderr, " Print runtime information at startup and exit.\n\n"); fprintf(stderr, " \033[1m-v, --version\033[0m\n\n"); fprintf(stderr, " Print version information and exit.\n\n"); fprintf(stderr, "\033[1mBUGS\033[0m\n"); fprintf(stderr, " Please visit https://github.com/gonmf/matilda for giving feedback.\n\n"); } int main( int argc, char * argv[] ) { start_cpu_time = clock(); alloc_init(); flog_config_modes(DEFAULT_LOG_MODES); flog_config_destinations(LOG_DEST_FILE); /* default for text mode */ bool flog_dest_set = false; bool use_gtp = pass_when_losing = (isatty(STDIN_FILENO) == 0); bool color_set = false; bool time_related_set = false; bool human_player_color = true; bool think_in_opt_turn = false; bool opening_books_enabled = true; set_time_per_turn(&current_clock_black, DEFAULT_TIME_PER_TURN); set_time_per_turn(&current_clock_white, DEFAULT_TIME_PER_TURN); d16 desired_num_threads = DEFAULT_NUM_THREADS; for (int i = 1; i < argc; ++i) { if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0) { fprintf(stderr, "matilda - Go/Igo/Weiqi/Baduk computer player\n\n"); usage(); return EXIT_SUCCESS; } if (strcmp(argv[i], "-v") == 0 || strcmp(argv[i], "--version") == 0) { char * s = alloc(); version_string(s); fprintf(stderr, "matilda %s\n", s); release(s); return EXIT_SUCCESS; } if (strcmp(argv[i], "-i") == 0 || strcmp(argv[i], "--info") == 0) { fprintf(stderr, "matilda - Go/Igo/Weiqi/Baduk computer player\n\n"); char * s = alloc(); build_info(s); fprintf(stderr, "\n%s\n", s); release(s); return EXIT_SUCCESS; } } fprintf(stderr, "matilda - Go/Igo/Weiqi/Baduk computer player\n\n"); u16 args_understood = 0; for (int i = 1; i < argc; ++i) { if ((strcmp(argv[i], "-l") == 0 || strcmp(argv[i], "--log") == 0)) { args_understood += 1; if (i == argc - 1) { flog_config_modes(0); continue; } if (argv[i + 1][0] == '-') { flog_config_modes(0); continue; } args_understood += 1; u16 mode = 0; for (u16 j = 0; argv[i + 1][j]; ++j) { if (argv[i + 1][j] == 'e') { mode |= LOG_MODE_ERROR; continue; } if (argv[i + 1][j] == 'w') { mode |= LOG_MODE_WARN; continue; } if (argv[i + 1][j] == 'p') { mode |= LOG_MODE_PROT; continue; } if (argv[i + 1][j] == 'i') { mode |= LOG_MODE_INFO; continue; } if (argv[i + 1][j] == 'd') { mode |= LOG_MODE_DEBUG; continue; } fprintf(stderr, "illegal logging mode: %c\n", argv[i + 1][j]); exit(EXIT_FAILURE); } flog_config_modes(mode); ++i; continue; } if (strcmp(argv[i], "--log_dest") == 0 && i < argc - 1) { args_understood += 2; u16 dest = 0; for (u16 j = 0; argv[i + 1][j]; ++j) { if (argv[i + 1][j] == 'o') { dest |= LOG_DEST_STDF; continue; } if (argv[i + 1][j] == 'f') { dest |= LOG_DEST_FILE; continue; } fprintf(stderr, "illegal logging destination: %c\n", argv[i + 1][j]); exit(EXIT_FAILURE); } flog_config_destinations(dest); flog_dest_set = true; ++i; continue; } if (strcmp(argv[i], "--memory") == 0 && i < argc - 1) { args_understood += 2; d32 v; if (!parse_int(&v, argv[i + 1])) { fprintf(stderr, "format error in size of transpositions table\n"); exit(EXIT_FAILURE); } if (v < 2) { fprintf(stderr, "invalid size for transpositions table\n"); exit(EXIT_FAILURE); } max_size_in_mbs = v; ++i; continue; } if (strcmp(argv[i], "--set") == 0 && i < argc - 2) { args_understood += 3; set_parameter(argv[i + 1], argv[i + 2]); i += 2; continue; } if ((strcmp(argv[i], "-d") == 0 || strcmp(argv[i], "--data") == 0) && i < argc - 1) { args_understood += 2; if (!set_data_folder(argv[i + 1])) { fprintf(stderr, "data directory path %s is not valid\n", argv[i + 1]); exit(EXIT_FAILURE); } ++i; continue; } if (strcmp(argv[i], "--threads") == 0 && i < argc - 1) { args_understood += 2; d32 v; if (!parse_int(&v, argv[i + 1])) { fprintf(stderr, "format error for thread number\n"); exit(EXIT_FAILURE); } if (v < 1 || v > MAXIMUM_NUM_THREADS) { fprintf(stderr, "invalid number of threads requested\n"); exit(EXIT_FAILURE); } desired_num_threads = v; ++i; continue; } } for (int i = 1; i < argc; ++i) { if (strcmp(argv[i], "--benchmark") == 0) { /* Perform a 2-minute benchmark made of 12 1-minute MCTS, printing the number of simulations per second. */ args_understood += 1; startup(false, desired_num_threads); /* Perform a larger initial MCTS just to allocate memory so all next runs are made in more similar pre-allocated memory conditions. */ mcts_benchmark(14 * 1000); u32 sims = 0; for (u8 i = 0; i < 12; ++i) { tt_clean_all() ; sims += mcts_benchmark(10 * 1000); } fprintf(stderr, "%u\n", sims / 120); return EXIT_SUCCESS; } } for (int i = 1; i < argc; ++i) { if ((strcmp(argv[i], "-m") == 0 || strcmp(argv[i], "--mode") == 0) && i < argc - 1) { args_understood += 2; if (strcmp(argv[i + 1], "text") == 0) { use_gtp = false; if (!flog_dest_set) { flog_config_destinations(LOG_DEST_FILE); } pass_when_losing = false; } else if (strcmp(argv[i + 1], "gtp") == 0) { use_gtp = true; if (!flog_dest_set) { flog_config_destinations(LOG_DEST_STDF | LOG_DEST_FILE); } pass_when_losing = true; } else { fprintf(stderr, "illegal format for mode\n"); exit(EXIT_FAILURE); } ++i; continue; } if ((strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--color") == 0) && i < argc - 1) { args_understood += 2; if (argv[i + 1][0] == 'b' || argv[i + 1][0] == 'B') { human_player_color = true; } else if (argv[i + 1][0] == 'w' || argv[i + 1][0] == 'W') { human_player_color = false; } else { fprintf(stderr, "illegal player color format\n"); exit(EXIT_FAILURE); } ++i; color_set = true; continue; } if (strcmp(argv[i], "--save_all") == 0) { args_understood += 1; save_all_games_to_file = true; continue; } if (strcmp(argv[i], "--think_in_opt_time") == 0) { args_understood += 1; think_in_opt_turn = true; time_related_set = true; continue; } if (strcmp(argv[i], "-t") == 0 || strcmp(argv[i], "--time") == 0) { args_understood += 2; time_system tmp; if (!str_to_time_system(&tmp, argv[i + 1])) { fprintf(stderr, "illegal time system format\n"); exit(EXIT_FAILURE); } set_time_system(&current_clock_black, tmp.main_time, tmp.byo_yomi_time, tmp.byo_yomi_stones, tmp.byo_yomi_periods); set_time_system(&current_clock_white, tmp.main_time, tmp.byo_yomi_time, tmp.byo_yomi_stones, tmp.byo_yomi_periods); char * s1 = alloc(); char * s2 = alloc(); time_system_to_str(s1, &current_clock_black); snprintf(s2, MAX_PAGE_SIZ, "Clock set to %s for both players.\n", s1); fprintf(stderr, "%s", s2); release(s2); release(s1); ++i; time_related_set = true; continue; } if (strcmp(argv[i], "--disable_gtp_time_control") == 0) { args_understood += 1; time_system_overriden = true; continue; } if (strcmp(argv[i], "--resign_on_timeout") == 0) { args_understood += 1; resign_on_timeout = true; time_related_set = true; continue; } if (strcmp(argv[i], "--playouts") == 0 && i < argc - 1) { args_understood += 2; d32 v; if (!parse_int(&v, argv[i + 1])) { fprintf(stderr, "format error in number of playouts\n"); exit(EXIT_FAILURE); } if (v < 1) { fprintf(stderr, "invalid number of playouts\n"); exit(EXIT_FAILURE); } limit_by_playouts = v; ++i; continue; } if (strcmp(argv[i], "--disable_opening_books") == 0) { args_understood += 1; opening_books_enabled = false; set_use_of_opening_book(false); continue; } if (strcmp(argv[i], "--sentinel") == 0 && i < argc - 1) { args_understood += 2; u32 len = strlen(argv[i + 1]) + 1; sentinel_file = malloc(len); memcpy(sentinel_file, argv[i + 1], len); ++i; continue; } } for (int i = 1; i < argc - 1; ++i) { if (strcmp(argv[i], "--losing") == 0) { args_understood += 2; if (strcmp(argv[i + 1], "pass") == 0) { pass_when_losing = true; } else if (strcmp(argv[i + 1], "resign") == 0) { pass_when_losing = false; } else { fprintf(stderr, "illegal format for --losing argument\n"); exit(EXIT_FAILURE); } ++i; continue; } } if (args_understood != argc - 1) { fprintf(stderr, "Unknown argument supplied; start with --help flag for usage instructions.\n"); return EXIT_FAILURE; } /* Errors for runtime options */ if (think_in_opt_turn && !use_gtp) { fprintf(stderr, "--think_in_opt_time flag set outside of GTP mode\n"); exit(EXIT_FAILURE); } if (use_gtp && color_set) { fprintf(stderr, "--color option set outside of text mode\n"); exit(EXIT_FAILURE); } if (time_related_set && limit_by_playouts > 0) { fprintf(stderr, "--playouts option set as well as time settings\n"); exit(EXIT_FAILURE); } /* Warnings for compile time options */ #if !MATILDA_RELEASE_MODE flog_warn("init", "running on debug mode"); #endif if (limit_by_playouts) { flog_warn("init", "MCTS using a constant number of simulations per turn"); } startup(opening_books_enabled, desired_num_threads); if (use_gtp) { main_gtp(think_in_opt_turn); } else { main_text(human_player_color); } return EXIT_SUCCESS; }
gsrb.ca.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ //#define GSRB_STRIDE2 //#define GSRB_FP //------------------------------------------------------------------------------------------------------------------------------ // This implements a communication avoiding (aggregation) smoother // It assumes... // in-place updates (no ping pong) // stencil radius==1 //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){ int box,s; int ghosts = level->box_ghosts; int communicationAvoiding = ghosts > stencil_get_radius(); if(stencil_get_radius()>1){fprintf(stderr,"CA GSRB requires a stencil radius of 1\n");exit(0);} // if communication-avoiding, need updated RHS for stencils in ghost zones if(communicationAvoiding)exchange_boundary(level,rhs_id,STENCIL_SHAPE_BOX); for(s=0;s<2*NUM_SMOOTHS;s+=ghosts){ // there are two sweeps per GSRB smooth exchange_boundary(level,phi_id,communicationAvoiding ? STENCIL_SHAPE_BOX: stencil_get_shape()); apply_BCs(level,phi_id,communicationAvoiding ? STENCIL_SHAPE_BOX: stencil_get_shape()); // now do ghosts communication-avoiding smooths on each box... uint64_t _timeStart = CycleTime(); for(box=0;box<level->num_my_boxes;box++){ int i,j,k,ss; int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k)&1; // is element 000 red or black ??? (should only be an issue if box dimension is odd) const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int dim = level->my_boxes[box].dim; const double h2inv = 1.0/(level->h*level->h); const double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ phi_new = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain const double * __restrict__ RedBlack[2] = {level->RedBlack_FP[0] + ghosts*(1+jStride), level->RedBlack_FP[1] + ghosts*(1+jStride)}; int ghostsToOperateOn=ghosts-1; for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){ #if defined(GSRB_FP) #warning GSRB using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization... #pragma omp parallel for private(i,j,k) collapse(2) for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){ for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){ for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){ int EvenOdd = (k^ss^color000)&1; int ij = i + j*jStride; int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); double lambda = Dinv_ijk(); phi_new[ijk] = phi[ijk] + RedBlack[EvenOdd][ij]*lambda*(rhs[ijk]-Ax); // compiler seems to get confused unless there are disjoint read/write pointers }}} #elif defined(GSRB_STRIDE2) #warning GSRB using stride-2 accesses to minimie the number of flop's #pragma omp parallel for private(i,j,k) collapse(2) for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){ for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){ for(i=((j^k^ss^color000)&1)+1-ghosts;i<dim+ghostsToOperateOn;i+=2){ // stride-2 GSRB int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); double lambda = Dinv_ijk(); phi_new[ijk] = phi[ijk] + lambda*(rhs[ijk]-Ax); }}} #else #warning GSRB using if-then-else on loop indices for Red-Black because its easy to read... #pragma omp parallel for private(i,j,k) collapse(2) for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){ for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){ for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){ if((i^j^k^ss^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0 int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); double lambda = Dinv_ijk(); phi_new[ijk] = phi[ijk] + lambda*(rhs[ijk]-Ax); }}}} #endif } // ss-loop } // boxes level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
GB_binop__isgt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int32) // A*D function (colscale): GB (_AxD__isgt_int32) // D*A function (rowscale): GB (_DxB__isgt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int32) // C=scalar+B GB (_bind1st__isgt_int32) // C=scalar+B' GB (_bind1st_tran__isgt_int32) // C=A+scalar GB (_bind2nd__isgt_int32) // C=A'+scalar GB (_bind2nd_tran__isgt_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
aix_ssha_fmt_plug.c
/* AIX ssha cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com> and magnum. * * Thanks to atom (of hashcat project) and philsmd for discovering and * publishing the details of various AIX hashing algorithms. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * magnum, and * it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_aixssha1; extern struct fmt_main fmt_aixssha256; extern struct fmt_main fmt_aixssha512; #elif FMT_REGISTERS_H john_register_one(&fmt_aixssha1); john_register_one(&fmt_aixssha256); john_register_one(&fmt_aixssha512); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // Tuned on i7 w/HT for SHA-256 #endif #endif #include "pbkdf2_hmac_sha1.h" #include "pbkdf2_hmac_sha256.h" #include "pbkdf2_hmac_sha512.h" #include "memdbg.h" #define FORMAT_LABEL_SHA1 "aix-ssha1" #define FORMAT_LABEL_SHA256 "aix-ssha256" #define FORMAT_LABEL_SHA512 "aix-ssha512" #define FORMAT_NAME_SHA1 "AIX LPA {ssha1}" #define FORMAT_NAME_SHA256 "AIX LPA {ssha256}" #define FORMAT_NAME_SHA512 "AIX LPA {ssha512}" #define FORMAT_TAG1 "{ssha1}" #define FORMAT_TAG256 "{ssha256}" #define FORMAT_TAG512 "{ssha512}" #define FORMAT_TAG1_LEN (sizeof(FORMAT_TAG1)-1) #define FORMAT_TAG256_LEN (sizeof(FORMAT_TAG256)-1) #define FORMAT_TAG512_LEN (sizeof(FORMAT_TAG512)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME_SHA1 "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA1 "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #ifdef SIMD_COEF_32 #define ALGORITHM_NAME_SHA256 "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA256 "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #ifdef SIMD_COEF_64 #define ALGORITHM_NAME_SHA512 "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA512 "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 /* actual max in AIX is 255 */ #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define CMP_SIZE BINARY_SIZE - 2 #define LARGEST_BINARY_SIZE 64 #define MAX_SALT_SIZE 24 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests aixssha_tests1[] = { {"{ssha1}06$T6numGi8BRLzTYnF$AdXq1t6baevg9/cu5QBBk8Xg.se", "whatdoyouwantfornothing$$$$$$"}, {"{ssha1}06$6cZ2YrFYwVQPAVNb$1agAljwERjlin9RxFxzKl.E0.sJ", "gentoo=>meh"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha1}06$uOYCzfO5dt0EdnwG$CK81ljQknzEAcfwg97cocEwz.gv", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static struct fmt_tests aixssha_tests256[] = { {"{ssha256}06$YPhynOx/iJQaJOeV$EXQbOSYZftEo3k01uoanAbA7jEKZRUU9LCCs/tyU.wG", "verylongbutnotverystrongpassword"}, {"{ssha256}06$5lsi4pETf/0p/12k$xACBftDMh30RqgrM5Sppl.Txgho41u0oPoD21E1b.QT", "I<3JtR"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha256}06$qcXPTOQzDAqZuiHc$pS/1HC4uI5jIERNerX8.Zd0G/gDepZuqR7S5WHEn.AW", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static struct fmt_tests aixssha_tests512[] = { {"{ssha512}06$y2/.O4drNJd3ecgJ$DhNk3sS28lkIo7XZaXWSkFOIdP2Zsd9DIKdYDSuSU5tsnl29Q7xTc3f64eAGMpcMJCVp/SXZ4Xgx3jlHVIOr..", "solarisalwaysbusyitseems"}, {"{ssha512}06$Dz/dDr1qa8JJm0UB$DFNu2y8US18fW37ht8WRiwhSeOqAMJTJ6mLDW03D/SeQpdI50GJMYb1fBog5/ZU3oM9qsSr9w6u22.OjjufV..", "idontbelievethatyourpasswordislongerthanthisone"}, /* hash posted on john-users */ {"{ssha512}06$................$0egLaF88SUk6GAFIMN/vTwa/IYB.KlubYmjiaWvmQ975vHvgC3rf0I6ZYzgyUiQftS8qs7ULLQpRLrA3LA....", "44"}, {"{ssha512}06$aXayEJGxA02Bl4d2$TWfWx34oD.UjrS/Qtco6Ij2XPY1CPYJfdk3CcxEjnMZvQw2p5obHYH7SI2wxcJgaS9.S9Hz948R.GdGwsvR...", "test"}, /* http://www.ibmsystemsmag.com/aix/administrator/security/password_hash/?page=2 <== partially corrupted hash? */ {"{ssha512}06$otYx2eSXx.OkEY4F$No5ZvSfhYuB1MSkBhhcKJIjS0.q//awdkcZwF9/TXi3EnL6QeronmS0jCc3P2aEV9WLi5arzN1YjVwkx8bng..", "colorado"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha512}06$w6THk2lJbkqW0rXv$yH6VWp3X9ad2l8nhYi22lrrmWskXvEU.PONbWUAZHrjhgQjdU83jtRnYmpRZIJzTVC3RFcoqpbtd63n/UdlS..", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int iterations; int type; unsigned char salt[MAX_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static inline int valid_common(char *ciphertext, struct fmt_main *self, int b64len, char *sig, int siglen) { char *p = ciphertext; int len; if (!strncmp(p, sig, siglen)) p += siglen; else return 0; len = strspn(p, DIGITCHARS); /* iterations, exactly two digits */ if (len != 2 || atoi(p) > 31) /* actual range is 4..31 */ return 0; p += 2; if (*p++ != '$') return 0; len = strspn(p, BASE64_CRYPT); /* salt, 8..24 base64 chars */ if (len < 8 || len > MAX_SALT_SIZE) return 0; p += len; if (*p++ != '$') return 0; len = strspn(p, BASE64_CRYPT); /* hash */ if (len != b64len) return 0; if (p[len] != 0) /* nothing more allowed */ return 0; return 1; } static int valid_sha1(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 27, FORMAT_TAG1, FORMAT_TAG1_LEN); } static int valid_sha256(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 43, FORMAT_TAG256, FORMAT_TAG256_LEN); } static int valid_sha512(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 86, FORMAT_TAG512, FORMAT_TAG512_LEN); } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; keeptr = ctcopy; memset(&cs, 0, sizeof(cs)); if ((strncmp(ciphertext, FORMAT_TAG1, FORMAT_TAG1_LEN) == 0)) { cs.type = 1; ctcopy += FORMAT_TAG1_LEN; } else if ((strncmp(ciphertext, FORMAT_TAG256, FORMAT_TAG256_LEN) == 0)) { cs.type = 256; ctcopy += FORMAT_TAG256_LEN; } else { cs.type = 512; ctcopy += FORMAT_TAG512_LEN; } p = strtokm(ctcopy, "$"); cs.iterations = 1 << atoi(p); p = strtokm(NULL, "$"); strncpy((char*)cs.salt, p, 17); MEM_FREE(keeptr); return (void *)&cs; } #define TO_BINARY(b1, b2, b3) { \ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out.c[b1] = value >> 16; \ out.c[b2] = value >> 8; \ out.c[b3] = value; } static void *get_binary(char *ciphertext) { static union { unsigned char c[LARGEST_BINARY_SIZE+3]; uint64_t dummy; } out; uint32_t value; char *pos = strrchr(ciphertext, '$') + 1; int len = strlen(pos); int i; memset(&out, 0, sizeof(out)); for (i = 0; i < len/4*3; i += 3) TO_BINARY(i, i + 1, i + 2); if (len % 3 == 1) { value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6); out.c[i] = value; } else if (len % 3 == 2) { /* sha-1, sha-256 */ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12); out.c[i++] = value >> 8; out.c[i++] = value; } return (void *)out.c; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int inc=1, index = 0; switch(cur_salt->type) { case 1: #ifdef SSE_GROUP_SZ_SHA1 inc = SSE_GROUP_SZ_SHA1; #endif break; case 256: #ifdef SSE_GROUP_SZ_SHA256 inc = SSE_GROUP_SZ_SHA256; #endif break; default: #ifdef SSE_GROUP_SZ_SHA512 inc = SSE_GROUP_SZ_SHA512; #endif break; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += inc) { int j = index; while (j < index + inc) { if (cur_salt->type == 1) { #ifdef SSE_GROUP_SZ_SHA1 int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)(saved_key[j]); x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha1((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } else if (cur_salt->type == 256) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { uint32_t *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)saved_key[j]; x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha256((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } else { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)saved_key[j]; x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; //dump_stuff_msg("\nbinary ", binary, CMP_SIZE); for (; index < count; index++) { //dump_stuff_msg("crypt_out", crypt_out[index], CMP_SIZE); if (!memcmp(binary, crypt_out[index], CMP_SIZE-2)) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], CMP_SIZE-2); } static int cmp_exact(char *source, int index) { return 1; } static void aixssha_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } /* report iteration count as tunable cost value */ static unsigned int aixssha_iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_aixssha1 = { { FORMAT_LABEL_SHA1, FORMAT_NAME_SHA1, ALGORITHM_NAME_SHA1, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 SSE_GROUP_SZ_SHA1, SSE_GROUP_SZ_SHA1, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG1 }, aixssha_tests1 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha1, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_aixssha256 = { { FORMAT_LABEL_SHA256, FORMAT_NAME_SHA256, ALGORITHM_NAME_SHA256, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 SSE_GROUP_SZ_SHA256, SSE_GROUP_SZ_SHA256, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG256 }, aixssha_tests256 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha256, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_aixssha512 = { { FORMAT_LABEL_SHA512, FORMAT_NAME_SHA512, ALGORITHM_NAME_SHA512, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_64 SSE_GROUP_SZ_SHA512, SSE_GROUP_SZ_SHA512, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG512 }, aixssha_tests512 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha512, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
main.c
//=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // DEFINE / INCLUDE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "AVI/avilib.h" #include "AVI/avimod.h" #include <omp.h> //#include "define.c" #include "kernel.c" //===============================================================================================================================================================================================================200 // WRITE DATA FUNCTION //===============================================================================================================================================================================================================200 void write_data( char* filename, int frameNo, int frames_processed, int endoPoints, int* input_a, int* input_b, int epiPoints, int* input_2a, int* input_2b){ //================================================================================80 // VARIABLES //================================================================================80 FILE* fid; int i,j; char c; //================================================================================80 // OPEN FILE FOR READING //================================================================================80 fid = fopen(filename, "w+"); if( fid == NULL ){ printf( "The file was not opened for writing\n" ); return; } //================================================================================80 // WRITE VALUES TO THE FILE //================================================================================80 fprintf(fid, "Total AVI Frames: %d\n", frameNo); fprintf(fid, "Frames Processed: %d\n", frames_processed); fprintf(fid, "endoPoints: %d\n", endoPoints); fprintf(fid, "epiPoints: %d", epiPoints); for(j=0; j<frames_processed;j++) { fprintf(fid, "\n---Frame %d---",j); fprintf(fid, "\n--endo--\n",j); for(i=0; i<endoPoints; i++){ fprintf(fid, "%d\t", input_a[j+i*frameNo]); } fprintf(fid, "\n"); for(i=0; i<endoPoints; i++){ // if(input_b[j*size+i] > 2000) input_b[j*size+i]=0; fprintf(fid, "%d\t", input_b[j+i*frameNo]); } fprintf(fid, "\n--epi--\n",j); for(i=0; i<epiPoints; i++){ //if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0; fprintf(fid, "%d\t", input_2a[j+i*frameNo]); } fprintf(fid, "\n"); for(i=0; i<epiPoints; i++){ //if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0; fprintf(fid, "%d\t", input_2b[j+i*frameNo]); } } // ================================================================================80 // CLOSE FILE // ================================================================================80 fclose(fid); } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // MAIN FUNCTION //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== int main(int argc, char *argv []){ //====================================================================================================================================================== // VARIABLES //====================================================================================================================================================== // counters int i; int frames_processed; // parameters public_struct public; private_struct private[ALL_POINTS]; //====================================================================================================================================================== // FRAMES //====================================================================================================================================================== if(argc!=4){ printf("ERROR: usage: heartwall <inputfile> <num of frames> <num of threads>\n"); exit(1); } char* video_file_name; video_file_name = argv[1]; avi_t* d_frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting if (d_frames == NULL) { AVI_print_error((char *) "Error with AVI_open_input_file"); return -1; } public.d_frames = d_frames; public.frames = AVI_video_frames(public.d_frames); public.frame_rows = AVI_video_height(public.d_frames); public.frame_cols = AVI_video_width(public.d_frames); public.frame_elem = public.frame_rows * public.frame_cols; public.frame_mem = sizeof(fp) * public.frame_elem; //====================================================================================================================================================== // CHECK INPUT ARGUMENTS //====================================================================================================================================================== frames_processed = atoi(argv[2]); if(frames_processed<0 || frames_processed>public.frames){ printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, public.frames); return 0; } int omp_num_threads; omp_num_threads = atoi(argv[3]); if (omp_num_threads <=0){ printf ("num of threads must be a positive integer"); return 0; } printf("num of threads: %d\n", omp_num_threads); //====================================================================================================================================================== // INPUTS //====================================================================================================================================================== //==================================================================================================== // ENDO POINTS //==================================================================================================== public.endoPoints = ENDO_POINTS; public.d_endo_mem = sizeof(int) * public.endoPoints; public.d_endoRow = (int *)malloc(public.d_endo_mem); public.d_endoRow[ 0] = 369; public.d_endoRow[ 1] = 400; public.d_endoRow[ 2] = 429; public.d_endoRow[ 3] = 452; public.d_endoRow[ 4] = 476; public.d_endoRow[ 5] = 486; public.d_endoRow[ 6] = 479; public.d_endoRow[ 7] = 458; public.d_endoRow[ 8] = 433; public.d_endoRow[ 9] = 404; public.d_endoRow[10] = 374; public.d_endoRow[11] = 346; public.d_endoRow[12] = 318; public.d_endoRow[13] = 294; public.d_endoRow[14] = 277; public.d_endoRow[15] = 269; public.d_endoRow[16] = 275; public.d_endoRow[17] = 287; public.d_endoRow[18] = 311; public.d_endoRow[19] = 339; public.d_endoCol = (int *)malloc(public.d_endo_mem); public.d_endoCol[ 0] = 408; public.d_endoCol[ 1] = 406; public.d_endoCol[ 2] = 397; public.d_endoCol[ 3] = 383; public.d_endoCol[ 4] = 354; public.d_endoCol[ 5] = 322; public.d_endoCol[ 6] = 294; public.d_endoCol[ 7] = 270; public.d_endoCol[ 8] = 250; public.d_endoCol[ 9] = 237; public.d_endoCol[10] = 235; public.d_endoCol[11] = 241; public.d_endoCol[12] = 254; public.d_endoCol[13] = 273; public.d_endoCol[14] = 300; public.d_endoCol[15] = 328; public.d_endoCol[16] = 356; public.d_endoCol[17] = 383; public.d_endoCol[18] = 401; public.d_endoCol[19] = 411; public.d_tEndoRowLoc = (int *)malloc(public.d_endo_mem * public.frames); public.d_tEndoColLoc = (int *)malloc(public.d_endo_mem * public.frames); //==================================================================================================== // EPI POINTS //==================================================================================================== public.epiPoints = EPI_POINTS; public.d_epi_mem = sizeof(int) * public.epiPoints; public.d_epiRow = (int *)malloc(public.d_epi_mem); public.d_epiRow[ 0] = 390; public.d_epiRow[ 1] = 419; public.d_epiRow[ 2] = 448; public.d_epiRow[ 3] = 474; public.d_epiRow[ 4] = 501; public.d_epiRow[ 5] = 519; public.d_epiRow[ 6] = 535; public.d_epiRow[ 7] = 542; public.d_epiRow[ 8] = 543; public.d_epiRow[ 9] = 538; public.d_epiRow[10] = 528; public.d_epiRow[11] = 511; public.d_epiRow[12] = 491; public.d_epiRow[13] = 466; public.d_epiRow[14] = 438; public.d_epiRow[15] = 406; public.d_epiRow[16] = 376; public.d_epiRow[17] = 347; public.d_epiRow[18] = 318; public.d_epiRow[19] = 291; public.d_epiRow[20] = 275; public.d_epiRow[21] = 259; public.d_epiRow[22] = 256; public.d_epiRow[23] = 252; public.d_epiRow[24] = 252; public.d_epiRow[25] = 257; public.d_epiRow[26] = 266; public.d_epiRow[27] = 283; public.d_epiRow[28] = 305; public.d_epiRow[29] = 331; public.d_epiRow[30] = 360; public.d_epiCol = (int *)malloc(public.d_epi_mem); public.d_epiCol[ 0] = 457; public.d_epiCol[ 1] = 454; public.d_epiCol[ 2] = 446; public.d_epiCol[ 3] = 431; public.d_epiCol[ 4] = 411; public.d_epiCol[ 5] = 388; public.d_epiCol[ 6] = 361; public.d_epiCol[ 7] = 331; public.d_epiCol[ 8] = 301; public.d_epiCol[ 9] = 273; public.d_epiCol[10] = 243; public.d_epiCol[11] = 218; public.d_epiCol[12] = 196; public.d_epiCol[13] = 178; public.d_epiCol[14] = 166; public.d_epiCol[15] = 157; public.d_epiCol[16] = 155; public.d_epiCol[17] = 165; public.d_epiCol[18] = 177; public.d_epiCol[19] = 197; public.d_epiCol[20] = 218; public.d_epiCol[21] = 248; public.d_epiCol[22] = 276; public.d_epiCol[23] = 304; public.d_epiCol[24] = 333; public.d_epiCol[25] = 361; public.d_epiCol[26] = 391; public.d_epiCol[27] = 415; public.d_epiCol[28] = 434; public.d_epiCol[29] = 448; public.d_epiCol[30] = 455; public.d_tEpiRowLoc = (int *)malloc(public.d_epi_mem * public.frames); public.d_tEpiColLoc = (int *)malloc(public.d_epi_mem * public.frames); //==================================================================================================== // ALL POINTS //==================================================================================================== public.allPoints = ALL_POINTS; //====================================================================================================================================================== // CONSTANTS //====================================================================================================================================================== public.tSize = 25; public.sSize = 40; public.maxMove = 10; public.alpha = 0.87; //====================================================================================================================================================== // SUMS //====================================================================================================================================================== for(i=0; i<public.allPoints; i++){ private[i].in_partial_sum = (fp *)malloc(sizeof(fp) * 2*public.tSize+1); private[i].in_sqr_partial_sum = (fp *)malloc(sizeof(fp) * 2*public.tSize+1); private[i].par_max_val = (fp *)malloc(sizeof(fp) * (2*public.tSize+2*public.sSize+1)); private[i].par_max_coo = (int *)malloc(sizeof(int) * (2*public.tSize+2*public.sSize+1)); } //====================================================================================================================================================== // INPUT 2 (SAMPLE AROUND POINT) //====================================================================================================================================================== public.in2_rows = 2 * public.sSize + 1; public.in2_cols = 2 * public.sSize + 1; public.in2_elem = public.in2_rows * public.in2_cols; public.in2_mem = sizeof(fp) * public.in2_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in2 = (fp *)malloc(public.in2_mem); private[i].d_in2_sqr = (fp *)malloc(public.in2_mem); } //====================================================================================================================================================== // INPUT (POINT TEMPLATE) //====================================================================================================================================================== public.in_mod_rows = public.tSize+1+public.tSize; public.in_mod_cols = public.in_mod_rows; public.in_mod_elem = public.in_mod_rows * public.in_mod_cols; public.in_mod_mem = sizeof(fp) * public.in_mod_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in_mod = (fp *)malloc(public.in_mod_mem); private[i].d_in_sqr = (fp *)malloc(public.in_mod_mem); } //====================================================================================================================================================== // ARRAY OF TEMPLATES FOR ALL POINTS //====================================================================================================================================================== public.d_endoT = (fp *)malloc(public.in_mod_mem * public.endoPoints); public.d_epiT = (fp *)malloc(public.in_mod_mem * public.epiPoints); //====================================================================================================================================================== // SETUP private POINTERS TO ROWS, COLS AND TEMPLATE //====================================================================================================================================================== for(i=0; i<public.endoPoints; i++){ private[i].point_no = i; private[i].in_pointer = private[i].point_no * public.in_mod_elem; private[i].d_Row = public.d_endoRow; // original row coordinates private[i].d_Col = public.d_endoCol; // original col coordinates private[i].d_tRowLoc = public.d_tEndoRowLoc; // updated row coordinates private[i].d_tColLoc = public.d_tEndoColLoc; // updated row coordinates private[i].d_T = public.d_endoT; // templates } #pragma omp parallel for firstprivate(i ) for(i=public.endoPoints; i<public.allPoints; i++){ private[i].point_no = i-public.endoPoints; private[i].in_pointer = private[i].point_no * public.in_mod_elem; private[i].d_Row = public.d_epiRow; private[i].d_Col = public.d_epiCol; private[i].d_tRowLoc = public.d_tEpiRowLoc; private[i].d_tColLoc = public.d_tEpiColLoc; private[i].d_T = public.d_epiT; } //====================================================================================================================================================== // CONVOLUTION //====================================================================================================================================================== public.ioffset = 0; public.joffset = 0; public.conv_rows = public.in_mod_rows + public.in2_rows - 1; // number of rows in I public.conv_cols = public.in_mod_cols + public.in2_cols - 1; // number of columns in I public.conv_elem = public.conv_rows * public.conv_cols; // number of elements public.conv_mem = sizeof(fp) * public.conv_elem; #pragma omp parallel for firstprivate(i ) for(i=0; i<public.allPoints; i++){ private[i].d_conv = (fp *)malloc(public.conv_mem); } //====================================================================================================================================================== // CUMULATIVE SUM //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY //==================================================================================================== //==================================================================================================== // VERTICAL CUMULATIVE SUM //==================================================================================================== public.in2_pad_add_rows = public.in_mod_rows; public.in2_pad_add_cols = public.in_mod_cols; public.in2_pad_rows = public.in2_rows + 2*public.in2_pad_add_rows; public.in2_pad_cols = public.in2_cols + 2*public.in2_pad_add_cols; public.in2_pad_elem = public.in2_pad_rows * public.in2_pad_cols; public.in2_pad_mem = sizeof(fp) * public.in2_pad_elem; #pragma omp parallel for firstprivate(i ) for(i=0; i<public.allPoints; i++){ private[i].d_in2_pad = (fp *)malloc(public.in2_pad_mem); } //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION //==================================================================================================== //==================================================================================================== // HORIZONTAL CUMULATIVE SUM //==================================================================================================== public.in2_pad_cumv_sel_rowlow = 1 + public.in_mod_rows; // (1 to n+1) public.in2_pad_cumv_sel_rowhig = public.in2_pad_rows - 1; public.in2_pad_cumv_sel_collow = 1; public.in2_pad_cumv_sel_colhig = public.in2_pad_cols; public.in2_pad_cumv_sel2_rowlow = 1; public.in2_pad_cumv_sel2_rowhig = public.in2_pad_rows - public.in_mod_rows - 1; public.in2_pad_cumv_sel2_collow = 1; public.in2_pad_cumv_sel2_colhig = public.in2_pad_cols; public.in2_sub_rows = public.in2_pad_cumv_sel_rowhig - public.in2_pad_cumv_sel_rowlow + 1; public.in2_sub_cols = public.in2_pad_cumv_sel_colhig - public.in2_pad_cumv_sel_collow + 1; public.in2_sub_elem = public.in2_sub_rows * public.in2_sub_cols; public.in2_sub_mem = sizeof(fp) * public.in2_sub_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in2_sub = (fp *)malloc(public.in2_sub_mem); } //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION, SQUARE, NUMERATOR //==================================================================================================== public.in2_sub_cumh_sel_rowlow = 1; public.in2_sub_cumh_sel_rowhig = public.in2_sub_rows; public.in2_sub_cumh_sel_collow = 1 + public.in_mod_cols; public.in2_sub_cumh_sel_colhig = public.in2_sub_cols - 1; public.in2_sub_cumh_sel2_rowlow = 1; public.in2_sub_cumh_sel2_rowhig = public.in2_sub_rows; public.in2_sub_cumh_sel2_collow = 1; public.in2_sub_cumh_sel2_colhig = public.in2_sub_cols - public.in_mod_cols - 1; public.in2_sub2_sqr_rows = public.in2_sub_cumh_sel_rowhig - public.in2_sub_cumh_sel_rowlow + 1; public.in2_sub2_sqr_cols = public.in2_sub_cumh_sel_colhig - public.in2_sub_cumh_sel_collow + 1; public.in2_sub2_sqr_elem = public.in2_sub2_sqr_rows * public.in2_sub2_sqr_cols; public.in2_sub2_sqr_mem = sizeof(fp) * public.in2_sub2_sqr_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in2_sub2_sqr = (fp *)malloc(public.in2_sub2_sqr_mem); } //====================================================================================================================================================== // CUMULATIVE SUM 2 //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY //==================================================================================================== //==================================================================================================== // VERTICAL CUMULATIVE SUM //==================================================================================================== //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION //==================================================================================================== //==================================================================================================== // HORIZONTAL CUMULATIVE SUM //==================================================================================================== //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION, DIFFERENTIAL LOCAL SUM, DENOMINATOR A, DENOMINATOR, CORRELATION //==================================================================================================== //====================================================================================================================================================== // TEMPLATE MASK CREATE //====================================================================================================================================================== public.tMask_rows = public.in_mod_rows + (public.sSize+1+public.sSize) - 1; public.tMask_cols = public.tMask_rows; public.tMask_elem = public.tMask_rows * public.tMask_cols; public.tMask_mem = sizeof(fp) * public.tMask_elem; #pragma omp parallel for firstprivate(i ) for(i=0; i<public.allPoints; i++){ private[i].d_tMask = (fp *)malloc(public.tMask_mem); } //====================================================================================================================================================== // POINT MASK INITIALIZE //====================================================================================================================================================== public.mask_rows = public.maxMove; public.mask_cols = public.mask_rows; public.mask_elem = public.mask_rows * public.mask_cols; public.mask_mem = sizeof(fp) * public.mask_elem; //====================================================================================================================================================== // MASK CONVOLUTION //====================================================================================================================================================== public.mask_conv_rows = public.tMask_rows; // number of rows in I public.mask_conv_cols = public.tMask_cols; // number of columns in I public.mask_conv_elem = public.mask_conv_rows * public.mask_conv_cols; // number of elements public.mask_conv_mem = sizeof(fp) * public.mask_conv_elem; public.mask_conv_ioffset = (public.mask_rows-1)/2; if((public.mask_rows-1) % 2 > 0.5){ public.mask_conv_ioffset = public.mask_conv_ioffset + 1; } public.mask_conv_joffset = (public.mask_cols-1)/2; if((public.mask_cols-1) % 2 > 0.5){ public.mask_conv_joffset = public.mask_conv_joffset + 1; } #pragma omp parallel for firstprivate(i ) for(i=0; i<public.allPoints; i++){ private[i].d_mask_conv = (fp *)malloc(public.mask_conv_mem); } //====================================================================================================================================================== // PRINT FRAME PROGRESS START //====================================================================================================================================================== printf("frame progress: "); fflush(NULL); //====================================================================================================================================================== // KERNEL //====================================================================================================================================================== for(public.frame_no=0; public.frame_no<frames_processed; public.frame_no++){ //==================================================================================================== // GETTING FRAME //==================================================================================================== // Extract a cropped version of the first frame from the video file public.d_frame = get_frame(public.d_frames, // pointer to video file public.frame_no, // number of frame that needs to be returned 0, // cropped? 0, // scaled? 1); // converted //==================================================================================================== // PROCESSING //==================================================================================================== omp_set_num_threads(omp_num_threads); #pragma omp parallel for for(i=0; i<public.allPoints; i++){ kernel( public, private[i]); } //==================================================================================================== // FREE MEMORY FOR FRAME //==================================================================================================== // free frame after each loop iteration, since AVI library allocates memory for every frame fetched free(public.d_frame); //==================================================================================================== // PRINT FRAME PROGRESS //==================================================================================================== printf("%d ", public.frame_no); fflush(NULL); } //====================================================================================================================================================== // PRINT FRAME PROGRESS END //====================================================================================================================================================== printf("\n"); fflush(NULL); //====================================================================================================================================================== // DEALLOCATION //====================================================================================================================================================== //==================================================50 // DUMP DATA TO FILE //==================================================50 #ifdef OUTPUT write_data( "result.txt", public.frames, frames_processed, public.endoPoints, public.d_tEndoRowLoc, public.d_tEndoColLoc, public.epiPoints, public.d_tEpiRowLoc, public.d_tEpiColLoc); #endif //==================================================================================================== // COMMON //==================================================================================================== free(public.d_endoRow); free(public.d_endoCol); free(public.d_tEndoRowLoc); free(public.d_tEndoColLoc); free(public.d_endoT); free(public.d_epiRow); free(public.d_epiCol); free(public.d_tEpiRowLoc); free(public.d_tEpiColLoc); free(public.d_epiT); //==================================================================================================== // POINTERS //==================================================================================================== #pragma omp parallel for firstprivate(i ) for(i=0; i<public.allPoints; i++){ free(private[i].in_partial_sum); free(private[i].in_sqr_partial_sum); free(private[i].par_max_val); free(private[i].par_max_coo); free(private[i].d_in2); free(private[i].d_in2_sqr); free(private[i].d_in_mod); free(private[i].d_in_sqr); free(private[i].d_conv); free(private[i].d_in2_pad); free(private[i].d_in2_sub); free(private[i].d_in2_sub2_sqr); free(private[i].d_tMask); free(private[i].d_mask_conv); } } //======================================================================================================================================================================================================== //======================================================================================================================================================================================================== // END OF FILE //======================================================================================================================================================================================================== //========================================================================================================================================================================================================
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { MagickStatusType status; double mean,sans,gamma,logmean; logmean=log(0.5); if ((channel & SyncChannels) != 0 ) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return LevelImageChannel(image, channel, 0.0, (double)QuantumRange, gamma); } /* auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, RedChannel, 0.0, (double)QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, GreenChannel, 0.0, (double)QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, BlueChannel, 0.0, (double)QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, OpacityChannel, 0.0, (double)QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status = status && LevelImageChannel(image, IndexChannel, 0.0, (double)QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* This is simply a convenience function around a Min/Max Histogram Stretch */ return MinMaxStretchImage(image, channel, 0.0, 0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetMagickToken(p,&p,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.2126*image->colormap[i].red+0.7152*image->colormap[i].green+ 0.0722*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.2126*GetPixelRed(q)+0.7152*GetPixelGreen(q)+ 0.0722*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); exception=(&image->exception); clut_view=AcquireCacheView(clut_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); (void) InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,QuantumScale*i*(clut_image->columns-adjust), QuantumScale*i*(clut_image->rows-adjust),clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImageChannel) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, *stretch_map, white; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (MagickPixelPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ status=MagickTrue; exception=(&image->exception); (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=PixelIntensityToQuantum(p); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex( indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=0.0; else if (i > (ssize_t) white.red) stretch_map[i].red=(MagickRealType) QuantumRange; else if (black.red != white.red) stretch_map[i].red=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0.0; else if (i > (ssize_t) white.green) stretch_map[i].green=(MagickRealType) QuantumRange; else if (black.green != white.green) stretch_map[i].green=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.green)/(white.green- black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0.0; else if (i > (ssize_t) white.blue) stretch_map[i].blue=(MagickRealType) QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.blue)/(white.blue- black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0.0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=(MagickRealType) QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.opacity)/(white.opacity- black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0.0; else if (i > (ssize_t) white.index) stretch_map[i].index=(MagickRealType) QuantumRange; else if (black.index != white.index) stretch_map[i].index=(MagickRealType) ScaleMapToQuantum( (MagickRealType) (MaxMap*(i-black.index)/(white.index- black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=ClampToQuantum(stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity); } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red)); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green)); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue)); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,ClampToQuantum(stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImageChannel) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(MagickPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define Enhance(weight) \ mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \ distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \ distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \ mean)*distance*distance; \ mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \ distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) \ pixel.green; \ distance_squared+=4.0*distance*distance; \ mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \ distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) \ pixel.blue; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) \ QuantumRange+1.0)-1.0-mean)*distance*distance; \ mean=((MagickRealType) r->opacity+pixel.opacity)/2; \ distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) \ QuantumRange+1.0)-1.0-mean)*distance*distance; \ if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \ QuantumRange/25.0f)) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireCacheView(image); enhance_view=AcquireCacheView(enhance_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket aggregate; MagickRealType distance, distance_squared, mean, total_weight; PixelPacket pixel; register const PixelPacket *restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); r=p+(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+2*(image->columns+4); Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0); r=p+3*(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+4*(image->columns+4); Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/ total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *equalize_map, *histogram, intensity, *map, white; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (MagickPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (MagickPixelPacket *) NULL) equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Integrate the histogram to get the equalization map. */ (void) ResetMagickMemory(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=(MagickRealType) ScaleMapToQuantum( (MagickRealType) ((MaxMap*(map[i].opacity-black.opacity))/ (white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=(MagickRealType) ScaleMapToQuantum((MagickRealType) ((MaxMap*(map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=ClampToQuantum(equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity); } } /* Equalize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red)); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green)); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue)); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity)); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,ClampToQuantum(equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImageChannel) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(MagickPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(const ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status|=GammaImageChannel(image,GreenChannel,(double) gamma.green); status|=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma)))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=(Quantum) QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } } } /* Gamma-correct image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (channel == DefaultChannels) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImageChannel) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ static inline size_t MagickMin(const size_t x,const size_t y) { if (x < y) return(x); return(y); } MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=MagickMin(hald_image->columns,hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireCacheView(image); hald_view=AcquireCacheView(hald_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel3); offset+=cube_size; (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel4); MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,point.z,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImageChannel) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantiumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline MagickRealType LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; if (pixel < black_point) return(0.0); if (pixel > white_point) return((MagickRealType) QuantumRange); scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0; level_pixel=(MagickRealType) QuantumRange*pow(scale*((double) pixel- black_point),1.0/gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelOpacity(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImageChannel) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantiumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) (ClampToQuantum(((MagickRealType) \ pow((double)(QuantumScale*(x)),1.0/gamma))*(white_point-black_point)+ \ black_point)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=LevelizeValue(image->colormap[i].opacity); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,LevelizeValue(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue( GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImageChannel) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickFalse; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status|=LevelImageChannel(image,RedChannel, black_color->red,white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelImageChannel(image,GreenChannel, black_color->green,white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelImageChannel(image,BlueChannel, black_color->blue,white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelImageChannel(image,OpacityChannel, black_color->opacity,white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelImageChannel(image,IndexChannel, black_color->index,white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status|=LevelizeImageChannel(image,RedChannel, black_color->red,white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status|=LevelizeImageChannel(image,GreenChannel, black_color->green,white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status|=LevelizeImageChannel(image,BlueChannel, black_color->blue,white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) status|=LevelizeImageChannel(image,OpacityChannel, black_color->opacity,white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=LevelizeImageChannel(image,IndexChannel, black_color->index,white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(PixelIntensityToQuantum(p))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white, 1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. And if the colorspace is % HWB, use blackness, whiteness, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness, Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness, Quantum *red,Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static void ModulateHWB(const double percent_hue,const double percent_whiteness, const double percent_blackness,Quantum *red,Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) { /* Modulate colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) switch (colorspace) { case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); break; } } } /* Modulate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) QuantumRange- image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) QuantumRange- image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) QuantumRange- image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange- GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange- GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange- GetPixelIndex(indexes+x)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o alpha: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o beta: midpoint of the function as a color value 0 to QuantumRange. % */ MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { if (sharpen != MagickFalse) { sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*((1.0/(1.0+exp(contrast*(midpoint/(double) QuantumRange- (double) i/MaxMap))))-(1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange)))))/((1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange-1.0))))-(1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange)))))+0.5)); continue; } sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*(QuantumScale*midpoint-log((1.0-(1.0/(1.0+exp(midpoint/ (double) QuantumRange*contrast))+((double) i/MaxMap)*((1.0/ (1.0+exp(contrast*(midpoint/(double) QuantumRange-1.0))))-(1.0/ (1.0+exp(midpoint/(double) QuantumRange*contrast))))))/ (1.0/(1.0+exp(midpoint/(double) QuantumRange*contrast))+ ((double) i/MaxMap)*((1.0/(1.0+exp(contrast*(midpoint/ (double) QuantumRange-1.0))))-(1.0/(1.0+exp(midpoint/ (double) QuantumRange*contrast))))))/contrast))); } if (image->storage_class == PseudoClass) { /* Sigmoidal-contrast enhance colormap. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImageChannel) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
gi_numeric_integrator_path_compressing.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef NUMERIC_INTEGRATOR_PATH_COMPRESSING_H #define NUMERIC_INTEGRATOR_PATH_COMPRESSING_H #include <set> #include <queue> #include <stack> #include "gi_basic_types.h" #include "gi_vectors.h" #include "gi_labeling.h" #include "gi_regular_grid_3d.h" #include "gi_regular_grid_trilinear_function.h" #include "gi_adaptive_euler_advector_3d.h" #include "gi_timing.h" #include "gi_union_find_labeling.h" #include "gi_index_comparer.h" #include "gi_topological_regular_grid_3d.h" #include "gi_array_index_partition.h" #include "omp.h" //#define OUTPUTINTERMEDIATE namespace GInt { template< class Advector, class GridFuncType > class NumericIntegratorPathCompressingToTerminal { protected: DenseLabeling<DestType>* m_desttype; DenseLabeling<int>* m_dest_label; RegularGrid3D* m_grid; GridFuncType* m_func; int m_num_iterations_left; Vec3i m_xyz; Vec3b m_periodic; FLOATTYPE m_error_threshold; FLOATTYPE m_gradient_threshold; FLOATTYPE m_filterValue; public: NumericIntegratorPathCompressingToTerminal(GridFuncType* func, RegularGrid3D* grid, FLOATTYPE error_threshold, FLOATTYPE gradient_threshold, int interation_limit) : m_num_iterations_left(interation_limit), m_xyz(func->GetGrid()->XYZ()), m_periodic(func->GetGrid()->Periodic()), m_func(func), m_grid(grid), m_gradient_threshold(gradient_threshold), m_error_threshold(error_threshold) { m_filterValue = std::numeric_limits<FLOATTYPE>::min(); } ~NumericIntegratorPathCompressingToTerminal() { delete m_desttype; delete m_dest_label; } DenseLabeling<int>* GetOutputLabels() { return m_dest_label; } RegularGrid3D* GetGrid() { return m_grid; } GridFuncType* GetFunction() { return m_func; } template <typename T> static void accumulate_and_clear_sets(std::vector<std::set<T>> & sets, std::set<T> &accSet) { accSet.clear(); for(unsigned int i = 0; i < sets.size(); i++) { accSet.insert( sets[i].begin(), sets[i].end() ); sets[i].clear(); } } #if 1 void BeginIntegration(const std::unordered_map<INT_TYPE, std::vector<INDEX_TYPE> >& remap, DenseLabeling<int>* teminals, bool verbose = false) { ThreadedTimer gtimer(1); gtimer.StartGlobal(); if(verbose) printf(" -- Performing numeric integration for volume assignment (%f)...\n", m_filterValue); m_dest_label = teminals; m_desttype = new DenseLabeling<DestType>(m_grid->NumElements()); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); AdvectionChecker* inside_voxel_critical_advection_checker = new TerminateNearPathCompressedRegion(m_desttype, m_grid); ThreadedTimer ltimer0(1); ltimer0.StartGlobal(); // --------------------------------------------------------------------- if (verbose){ printf(" -- finding extrema to terminate integral lines...\n"); fflush(stdout); } #pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { // Harsh added a new label for filtering //if (fabs(m_func->SampleImage(i)) <= m_filterValue) { // m_desttype->SetLabel(i, DestType::BACKGROUND); // m_dest_label->SetLabel(i, -2); // continue; //} switch (m_dest_label->GetLabel(i)) { case -1: m_desttype->SetLabel(i, DestType::UNASSIGNED); break; default: m_desttype->SetLabel(i, DestType::CERTAIN_TERMINAL); } } ltimer0.EndGlobal(); for (auto lp : remap) { for (auto id : lp.second) { m_func->SetGradExplicit(id, Vec3d(0, 0, 0)); } } #ifdef OUTPUTINTERMEDIATE m_dest_label->OutputToFile("certains.raw"); m_desttype->OutputToIntFile("certains_type.raw"); { m_dest_label->OutputToFile("certain_expansion.raw"); m_dest_label->OutputToIntFile("dest_original.raw"); } TopologicalRegularGridRestricted* ttgrid = new TopologicalRegularGridRestricted(m_grid); VertexLabelingToBoundaryLabeling<int>* tedge = new VertexLabelingToBoundaryLabeling<int>(m_dest_label, ttgrid); tedge->ComputeBoundary(); tedge->OutputEdgesToFile("certain_edges.txt"); FILE* fout = fopen("Linesout.txt", "w"); #endif if (verbose){ //printf(" expansion done!\n", mExtrema.size()); printf(" -- doing numerical integration first pass with path compression..."); fflush(stdout); } // --------------------------------------------------------------------- ThreadedTimer ltimer1(1); ltimer1.StartGlobal(); int t1, t2; t1 = t2 = 0; #pragma omp parallel { Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_critical_advection_checker); std::vector<INDEX_TYPE> t_path; t_path.reserve(100); int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); std::vector<INDEX_TYPE> partition; GInt::ArrayIndexPartitioner::EvenChunkSplit(t_num_vertices, num_threads, partition); INDEX_TYPE num_to_do = (partition[thread_num + 1] - partition[thread_num]); for (INDEX_TYPE kk = 1; kk <= num_to_do * 4; kk*=2) { INDEX_TYPE startsize = num_to_do / kk; INDEX_TYPE stepsize = (num_to_do*2) / kk; if (stepsize == 0) continue; for (INDEX_TYPE i = partition[thread_num] + startsize; i < partition[thread_num + 1]; i += stepsize) { // early skip if this is already a maximum if (m_desttype->GetLabel(i) != DestType::UNASSIGNED) { continue; } t_path.clear(); t_path.push_back(i); Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the point Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; #ifdef OUTPUTINTERMEDIATE std::vector<Vec3d> line_soup; line_soup.push_back(t_current_point); #endif while (t_continue) { ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); t_path.push_back(t_next_id); if (t_return_code == ADVECTION_EVENT::OUT_OF_VOXEL) continue; #ifdef OUTPUTINTERMEDIATE line_soup.push_back(t_current_point); #endif // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { int t_dest_label = m_dest_label->GetLabel(t_next_id); //#pragma omp critical { for (int j = 0; j < t_path.size(); j++) { INDEX_TYPE jj = t_path[j]; if (m_desttype->GetLabel(jj) == DestType::UNASSIGNED) { m_dest_label->SetLabel(jj, t_dest_label); m_desttype->SetLabel(jj, DestType::ASSIGNED); } } } #ifdef OUTPUTINTERMEDIATE #pragma omp critical { int tn = omp_get_thread_num(); fprintf(fout, "%d %d %d %d\n", i, tn, line_soup.size(), t_dest_label); for (int j = 0; j < line_soup.size(); j++) { fprintf(fout, "%f %f %f\n", line_soup[j][0], line_soup[j][1], line_soup[j][2]); } } #endif t_continue = false; } } } } } #ifdef OUTPUTINTERMEDIATE fclose(fout); m_dest_label->OutputToIntFile("first_integration.raw"); m_dest_label->OutputToIntFile("dests_after_first_integration.raw"); m_dest_label->OutputToIntFile("first_integration.raw"); m_desttype->OutputToIntFile("first_integration_type.raw"); #endif ltimer1.EndGlobal(); // --------------------------------------------------------------------- if (verbose){ printf(" done!"); ltimer1.PrintAll(); printf(" -- checking unambiguous voxels..."); fflush(stdout); } ThreadedTimer ltimer2(1); ltimer2.StartGlobal(); // we will process vertices in iterations // this set will contain verts to be processed in the next iteration std::set<INDEX_TYPE> verts_2b_processed_set; // to avoid locking by threads, we will use local copies where threads will add verts // later, we will merge these into the main set defined above std::vector< std::set<INDEX_TYPE> > verts_thrds ( omp_get_max_threads() ); #pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { if (m_desttype->GetLabel(i) == DestType::BACKGROUND) continue; //if (m_desttype->GetLabel(i) == DestType::ASSIGNED) { // verts_thrds[omp_get_thread_num()].insert(i); //} Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the poitn Vec3l negs[6]; int nn = m_grid->GatherExistingNeighborsAll6(t_coords, negs); for (int j = 0; j < nn; j++) { INDEX_TYPE v2 = m_grid->Index3d(negs[j]); if (v2 > i) continue; if (m_dest_label->GetLabel(i) != m_dest_label->GetLabel(v2)) { if (m_desttype->GetLabel(i) == DestType::ASSIGNED) { verts_thrds[ omp_get_thread_num() ].insert(i); } if (m_desttype->GetLabel(v2) == DestType::ASSIGNED) { verts_thrds[ omp_get_thread_num() ].insert(v2); } } } } // collect the results of all threads accumulate_and_clear_sets( verts_thrds, verts_2b_processed_set ); ltimer2.EndGlobal(); // --------------------------------------------------------------------- if (verbose){ printf(" done!"); ltimer2.PrintAll(); fflush(stdout); //printf(" -- found %d points needed correction...", verts_2b_processed_set.size()); //fflush(stdout); } ThreadedTimer ltimer3(1); ltimer3.StartGlobal(); AdvectionChecker* inside_voxel_nostop_advection_checker = new TerminateNearOriginalCertain(m_desttype, m_grid); size_t totalfixed = 0; // this loop will iterate until no more verts need to be processed for(unsigned int itern = 0; !verts_2b_processed_set.empty(); itern++) { // transfer from set to vector to start processing // use the set to store the verts needed in the next iteration std::vector<INDEX_TYPE> verts_2b_processed_vector ( verts_2b_processed_set.begin(), verts_2b_processed_set.end() ); if (verbose){ printf(" -- iteration %d will process %d vertices\n", itern, verts_2b_processed_vector.size()); } totalfixed += verts_2b_processed_vector.size(); #pragma omp parallel for for(int i = 0; i < verts_2b_processed_vector.size(); i++) { Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_nostop_advection_checker); INDEX_TYPE current_vertex = verts_2b_processed_vector[i]; int init_label = m_dest_label->GetLabel(current_vertex); // INTEGRATE // INTEGRATE Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn //if (t_coords[0] == 0 && t_coords[1] == 0) printf("doing %d\n", t_coords[2]); Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; int new_label; while (t_continue) { ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { new_label = m_dest_label->GetLabel(t_next_id); if (m_desttype->GetLabel(t_next_id) != DestType::CERTAIN_TERMINAL){ //printf("whoatherenelly %d %d\n", m_desttype->GetLabel(t_next_id), t_return_code); } t_continue = false; } } // INTEGRATE // INTEGRATE // m_dest_label->SetLabel(current_vertex, new_label); m_desttype->SetLabel(current_vertex, DestType::CERTAIN_NONTERMINAL); if (new_label != init_label){ // ENQUEUE NEIGHBORS Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn Vec3l negs[6]; //INDEX_TYPE negids[6]; int nn = m_grid->GatherExistingNeighborsAll6(t_coords, negs); // for each neigbhor for (int j = 0; j < nn; j++){ INDEX_TYPE negid = m_grid->Index3d(negs[j]); // only if it has not yet been added to our update set if (m_desttype->GetLabel(negid) == DestType::ASSIGNED && m_dest_label->GetLabel(negid) != new_label) { verts_thrds[ omp_get_thread_num() ].insert(negid); } } } } accumulate_and_clear_sets( verts_thrds, verts_2b_processed_set ); } ltimer3.EndGlobal(); #ifdef OUTPUTINTERMEDIATE m_desttype->OutputToIntFile("classes_type.raw"); #endif if (verbose){ printf(" -- done! fixed a total of %d vertices!", totalfixed); ltimer3.PrintAll(); } gtimer.EndGlobal(); if(verbose){ printf(" -- done numerical integration!"); gtimer.PrintAll(); } } #else void BeginIntegration(bool verbose = false) { verbose = true; ThreadedTimer gtimer(1); gtimer.StartGlobal(); if(verbose) printf(" -- Performing numeric integration for volume assignment (%f)...\n", m_filterValue); //m_func->ComputeGradFromImage(m_rkindex); m_dest_label = new DenseLabeling<int>(m_grid->NumElements()); m_desttype = new DenseLabeling<DestType>(m_grid->NumElements()); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); // THIS WILL NEED TO CHANGE AdvectionChecker* inside_voxel_critical_advection_checker = new TerminateNearPathCompressedRegion(m_desttype, m_grid); //AdvectionChecker* no_check = new NoTermination();//AdvectionChecker* inside_voxel_advection_checker = new TerminateNearAssigned(m_destinations, m_grid); ThreadedTimer ltimer0(1); ltimer0.StartGlobal(); if (verbose){ printf(" -- finding extrema to terminate integral lines..."); fflush(stdout); } // set all potential extrema, so we terminate near them //#pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { //printf(" thread %d of %d max %d does vertx %d\n", // omp_get_thread_num(), omp_get_num_threads(), omp_get_max_threads(), i ); // Harsh added a new label for filtering if (fabs(m_func->SampleImage(i)) <= m_filterValue) { m_desttype->SetLabel(i, DestType::BACKGROUND); m_dest_label->SetLabel(i, -2); continue; } m_desttype->SetLabel(i, DestType::UNASSIGNED); m_dest_label->SetLabel(i, -1); if (IsExtremeVertexIn6Neighborhood(i)) { //#pragma omp critical { mExtrema.push_back(i); //m_extrema.insert(i); //m_dest_label->SetLabel(i, 0); } } } #ifdef OUTPUTINTERMEDIATE m_dest_label->OutputToIntFile("crits.raw"); #endif ltimer0.EndGlobal(); ltimer0.PrintAll(); if (verbose){ printf(" done! found %d extrema!\n", mExtrema.size()); printf(" -- expanding extrema certain regions..."); fflush(stdout); } int num_extrema = mExtrema.size(); //#pragma omp parallel shared(mExtrema) { //#pragma omp for schedule(dynamic) nowait for (int m = 0; m < num_extrema; m++) { INDEX_TYPE maximum = mExtrema[m]; Expand_Lower_Neighborhood(maximum, m); m_func->SetGradExplicit(maximum, Vec3d(0, 0, 0)); } } #ifdef OUTPUTINTERMEDIATE m_dest_label->OutputToFile("certains.raw"); m_desttype->OutputToIntFile("certains_type.raw"); { m_dest_label->OutputToFile("certain_expansion.raw"); m_dest_label->OutputToIntFile("dest_original.raw"); } TopologicalRegularGridRestricted* ttgrid = new TopologicalRegularGridRestricted(m_grid); VertexLabelingToBoundaryLabeling<int>* tedge = new VertexLabelingToBoundaryLabeling<int>(m_dest_label, ttgrid); tedge->ComputeBoundary(); tedge->OutputEdgesToFile("certain_edges.txt"); FILE* fout = fopen("Linesout.txt", "w"); #endif if (verbose){ printf(" expansion done!\n", mExtrema.size()); printf(" -- doing numerical integration first pass with path compression..."); fflush(stdout); } ThreadedTimer ltimer1(1); ltimer1.StartGlobal(); int t1, t2; t1 = t2 = 0; //#pragma omp parallel { Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_critical_advection_checker); std::vector<INDEX_TYPE> t_path; t_path.reserve(100); int num_threads = omp_get_num_threads(); int thread_num = omp_get_thread_num(); printf(" thread %d of %d\n", thread_num, num_threads); std::vector<INDEX_TYPE> partition; GInt::ArrayIndexPartitioner::EvenChunkSplit(t_num_vertices, num_threads, partition); INDEX_TYPE num_to_do = (partition[thread_num + 1] - partition[thread_num]); for (INDEX_TYPE kk = 1; kk <= num_to_do * 4; kk*=2) { INDEX_TYPE startsize = num_to_do / kk; INDEX_TYPE stepsize = (num_to_do*2) / kk; if (stepsize == 0) continue; //printf("%d %d %d\n", num_to_do, kk, stepsize); for (INDEX_TYPE i = partition[thread_num] + startsize; i < partition[thread_num + 1]; i += stepsize) { //#pragma omp for schedule(guided) nowait // for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { // early skip if this is already a maximum if (m_desttype->GetLabel(i) != DestType::UNASSIGNED) { continue; } t_path.clear(); t_path.push_back(i); Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the poitn //if (t_coords[0] == 0 && t_coords[1] == 0) printf("doing %d\n", t_coords[2]); Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; #ifdef OUTPUTINTERMEDIATE std::vector<Vec3d> line_soup; line_soup.push_back(t_current_point); #endif while (t_continue) { Vec3d t_next_point; ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); t_path.push_back(t_next_id); if (t_return_code == ADVECTION_EVENT::OUT_OF_VOXEL) continue; #ifdef OUTPUTINTERMEDIATE line_soup.push_back(t_current_point); #endif // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { int t_dest_label = m_dest_label->GetLabel(t_next_id); //DestType t_certain_label = m_certains->GetLabel(t_next_id); //#pragma omp critical //if (t_dest_label == -1) { // printf("who there got here %d %d %d %d %d\n", // m_desttype->GetLabel(t_next_id), t_next_id, t_return_code, t_num_iterations_left, t_path.size()); // m_grid->XYZ3d(i).PrintInt(); printf("->"); t_coords.PrintInt(); //} //#pragma omp critical { for (int j = 0; j < t_path.size(); j++) { INDEX_TYPE jj = t_path[j]; if (m_desttype->GetLabel(jj) == DestType::UNASSIGNED) { m_dest_label->SetLabel(jj, t_dest_label); m_desttype->SetLabel(jj, DestType::ASSIGNED); } //m_certains->SetLabel(t_path[j], t_certain_label); } #ifdef OUTPUTINTERMEDIATE #pragma omp critical { int tn = omp_get_thread_num(); fprintf(fout, "%d %d %d %d\n", i, tn, line_soup.size(), t_dest_label); for (int j = 0; j < line_soup.size(); j++) { fprintf(fout, "%f %f %f\n", line_soup[j][0], line_soup[j][1], line_soup[j][2]); } } #endif } t_continue = false; } } } } } #ifdef OUTPUTINTERMEDIATE fclose(fout); m_dest_label->OutputToIntFile("first_integration.raw"); m_dest_label->OutputToIntFile("dests_after_first_integration.raw"); m_dest_label->OutputToIntFile("first_integration.raw"); m_desttype->OutputToIntFile("first_integration_type.raw"); #endif ltimer1.EndGlobal(); ltimer1.PrintAll(); if (verbose){ printf(" done!\n"); printf(" -- checking unambiguous voxels..."); fflush(stdout); } ThreadedTimer ltimer2(1); ltimer2.StartGlobal(); std::unordered_set<INDEX_TYPE> added_vertices; std::stack<INDEX_TYPE> vertex_stack; //#pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { Vec3l t_coords = m_grid->XYZ3d(i); // get the coordinates of the poitn Vec3l negs[6]; //INDEX_TYPE negids[6]; int nn = m_grid->GatherExistingNeighborsSameBdry6(t_coords, negs); for (int j = 0; j < nn; j++) { INDEX_TYPE v2 = m_grid->Index3d(negs[j]); if (v2 > i) continue; //if (m_desttype->GetLabel(i) != DestType::ASSIGNED || m_desttype->GetLabel(v2) != DestType::ASSIGNED) continue; if (m_dest_label->GetLabel(i) != m_dest_label->GetLabel(v2)) { if (m_desttype->GetLabel(i) == DestType::ASSIGNED) { //#pragma omp critical { if (added_vertices.count(i) == 0) { added_vertices.insert(i); vertex_stack.push(i); } } } if (m_desttype->GetLabel(v2) == DestType::ASSIGNED) { //#pragma omp critical { if (added_vertices.count(v2) == 0) { added_vertices.insert(v2); vertex_stack.push(v2); } } } } } } // TopologicalRegularGrid3D* tgrid = new TopologicalRegularGrid3D(m_grid); // int num_cells = tgrid->numCells(); // // // /// in parallel gather the vertices that need to be updated //#pragma omp parallel // { // // int num_threads = omp_get_num_threads(); // int thread_num = omp_get_thread_num(); // // std::vector<INDEX_TYPE> partition; // ArrayIndexPartitioner::EvenChunkSplit(tgrid->numCells(), num_threads, partition); // TopologicalRegularGrid3D::DCellsIterator edges(tgrid, 1, partition[thread_num], partition[thread_num + 1]); // for (edges.begin(); edges.valid(); edges.advance()) { // TopologicalRegularGrid3D::FacetsIterator fit(tgrid); // fit.begin(edges.value()); // INDEX_TYPE tv1 = fit.value(); // fit.advance(); // INDEX_TYPE tv2 = fit.value(); // // INDEX_TYPE v1 = tgrid->VertexNumberFromCellID(tv1); // INDEX_TYPE v2 = tgrid->VertexNumberFromCellID(tv2); // // if (m_desttype->GetLabel(v1) != DestType::ASSIGNED || m_desttype->GetLabel(v2) != DestType::ASSIGNED) continue; // if (m_dest_label->GetLabel(v1) != m_dest_label->GetLabel(v2)) { // // if (m_desttype->GetLabel(v1) == DestType::ASSIGNED) { //#pragma omp critical // { // if (added_vertices.count(v1) == 0) { // added_vertices.insert(v1); // vertex_stack.push(v1); // } // } // } // if (m_desttype->GetLabel(v2) == DestType::ASSIGNED) { //#pragma omp critical // { // if (added_vertices.count(v2) == 0) { // added_vertices.insert(v2); // vertex_stack.push(v2); // } // } // } // } // } // } // END PARALLEL SECTION // //for (auto id : added_vertices) { // m_desttype->SetLabel(id, DestType::BACKGROUND); //} //m_desttype->OutputToIntFile("reintegrate.raw"); if (verbose){ printf(" done!"); ltimer2.EndGlobal(); ltimer2.PrintAll(); printf(" -- found %d points needed correction %d...", added_vertices.size(), vertex_stack.size()); fflush(stdout); } ThreadedTimer ltimer3(1); ltimer3.StartGlobal(); // NOW FIX LABELS AdvectionChecker* inside_voxel_nostop_advection_checker = new TerminateNearOriginalCertain(m_desttype, m_grid); //#pragma omp parallel { int cnt = 0; Advector t_advector(m_grid, m_func, m_gradient_threshold, m_error_threshold, inside_voxel_nostop_advection_checker); bool keep_going = true; while (keep_going) { INDEX_TYPE current_vertex; //#pragma omp critical { if (vertex_stack.size() > 0) { current_vertex = vertex_stack.top(); vertex_stack.pop(); } else { keep_going = false; } } if (cnt ++ < 10 ){ printf(" [%d] = %d\n", cnt, current_vertex); } if (keep_going) { // INITIAL VALUE int init_label = m_dest_label->GetLabel(current_vertex); // INTEGRATE // INTEGRATE Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn //if (t_coords[0] == 0 && t_coords[1] == 0) printf("doing %d\n", t_coords[2]); Vec3d t_current_point = t_coords; int t_num_iterations_left = m_num_iterations_left; bool t_continue = true; int new_label; while (t_continue) { Vec3d t_next_point; ADVECTION_EVENT t_return_code; if (m_grid->DistToBoundary(t_coords) <= 1) { t_return_code = t_advector.AdvectThroughVoxelNearBoundary(t_current_point, t_num_iterations_left); t_coords = m_grid->Inbounds(t_current_point + 0.5); // get nearest integer voxel t1++; } else { t_return_code = t_advector.AdvectThroughVoxelNoCheck(t_current_point, t_num_iterations_left); t_coords = (t_current_point + 0.5); t2++; } INDEX_TYPE t_next_id = m_grid->Index3d(t_coords); // if we terminated or hit a critical point, then we are done if (t_return_code == ADVECTION_EVENT::LOW_GRADIENT || t_return_code == ADVECTION_EVENT::HIT_EXTREMUM || t_return_code == ADVECTION_EVENT::HIT_PREASSIGNED || t_return_code == ADVECTION_EVENT::OVER_MAX_ITERATIONS) { new_label = m_dest_label->GetLabel(t_next_id); if (m_desttype->GetLabel(t_next_id) != DestType::CERTAIN_TERMINAL) printf("whoatherenelly %d %d\n", m_desttype->GetLabel(t_next_id), t_return_code); t_continue = false; } } // INTEGRATE // INTEGRATE m_dest_label->SetLabel(current_vertex, new_label); m_desttype->SetLabel(current_vertex, DestType::CERTAIN_NONTERMINAL); if (new_label != init_label) { // ENQUEUE NEIGHBORS Vec3l t_coords = m_grid->XYZ3d(current_vertex); // get the coordinates of the poitn Vec3l negs[6]; INDEX_TYPE negids[6]; int nn = m_grid->GatherExistingNeighborsSameBdry6(t_coords, negs); for (int j = 0; j < nn; j++) negids[j] = m_grid->Index3d(negs[j]); //#pragma omp critical { // for each neigbhor for (int j = 0; j < nn; j++) { INDEX_TYPE negid = negids[j]; // only if it has not yet been added to our update set if (added_vertices.count(negid) == 0) { if (m_desttype->GetLabel(negid) == DestType::ASSIGNED && m_dest_label->GetLabel(negid) != new_label) { added_vertices.insert(negid); vertex_stack.push(negid); } } } } } } } // END WHILE } // END PARALLEL ltimer3.EndGlobal(); ltimer3.PrintAll(); #ifdef OUTPUTINTERMEDIATE m_desttype->OutputToIntFile("classes_type.raw"); #endif if (verbose){ printf(" done! fixed a total of %d vertices\n", added_vertices.size()); } gtimer.EndGlobal(); gtimer.PrintAll(); } #endif }; } #endif
rose_jacobi_float_avx2.c
#include "rex_kmp.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #include <math.h> #include <immintrin.h> #define REAL float static double read_timer_ms() { struct timeb tm; ftime(&tm); return ((double )tm . time) * 1000.0 + ((double )tm . millitm); } /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define DEFAULT_DIMSIZE 256 void print_array(char *title,char *name,float *A,int n,int m) { printf("%s:\n",title); int i; int j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ",name,i,j,A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize(int n,int m,float alpha,float *dx,float *dy,float *u_p,float *f_p) { int i; int j; int xx; int yy; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); //double PI=3.1415926; *dx = (2.0 / (n - 1)); *dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int )(- 1.0 + ( *dx * (i - 1)))); yy = ((int )(- 1.0 + ( *dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy))); } } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check(int n,int m,float alpha,float dx,float dy,float *u_p,float *f_p) { int i; int j; float xx; float yy; float temp; float error; error = 0.0; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (- 1.0 + (dx * (i - 1))); yy = (- 1.0 + (dy * (j - 1))); temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy))); error = error + temp * temp; } error = (sqrt(error) / (n * m)); printf("Solution Error: %2.6g\n",error); } void jacobi_seq(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); void jacobi_omp(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); int main(int argc,char *argv[]) { int status = 0; int n = 256; int m = 256; float alpha = 0.0543; float tol = 0.0000000001; float relax = 1.0; int mits = 5000; /*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n"); fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n); fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m); fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha); fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol); fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax); fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/ if (argc == 2) { sscanf(argv[1],"%d",&n); m = n; } else if (argc == 3) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); } else if (argc == 4) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); } else if (argc == 5) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); } else if (argc == 6) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); } else if (argc == 7) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); sscanf(argv[6],"%d",&mits); } else { /* the rest of arg ignored */ } printf("jacobi %d %d %g %g %g %d\n",n,m,alpha,tol,relax,mits); printf("------------------------------------------------------------------------------------------------------\n"); /** init the array */ float *u = (float *)(malloc(sizeof(float ) * n * m)); float *uomp = (float *)(malloc(sizeof(float ) * n * m)); float *f = (float *)(malloc(sizeof(float ) * n * m)); float dx; /* grid spacing in x direction */ float dy; /* grid spacing in y direction */ initialize(n,m,alpha,&dx,&dy,u,f); memcpy(uomp,u,sizeof(float ) * n * m); //warming up jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits); jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits); initialize(n,m,alpha,&dx,&dy,u,f); memcpy(uomp,u,sizeof(float ) * n * m); int num_runs = 20; double elapsed = 0; for (int i = 0; i < 20; i++) { double elapsed1 = read_timer_ms(); jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits); elapsed += read_timer_ms() - elapsed1; } printf("seq elasped time(ms): %4f\n",elapsed / num_runs); //double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; //printf("MFLOPS: %12.6g\n", mflops); puts("================"); double elapsed2 = 0; for (int i = 0; i < 20; i++) { double elapsed3 = read_timer_ms(); jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits); elapsed2 += read_timer_ms() - elapsed3; } printf("OpenMP elasped time(ms): %4f\n",elapsed2 / num_runs); //mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; //printf("MFLOPS: %12.6g\n", mflops); //print_array("Sequential Run", "u",(REAL*)u, n, m); error_check(n,m,alpha,dx,dy,u,f); free(u); free(f); free(uomp); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * mits Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi_seq(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float uold[n][m]; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); } void jacobi_omp(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float *tmp = (float *)(malloc(sizeof(float ) * n * m)); float (*uold)[m] = ((float (*)[m])tmp); float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; //printf("===================== iteration %d ===========================\n", k); /* Copy new solution into old */ for (i = 0; i < n; i++) { for (j = 0; j <= m - 1; j += 8) { float *__ptr0 = uold[i]; float *__ptr1 = u[i]; __m256 __vec2 = _mm256_loadu_ps(&__ptr1[j]); _mm256_storeu_ps(&__ptr0[j],__vec2); } } for (i = 1; i < n - 1; i++) { #pragma omp simd simdlen(8) reduction(+ : error) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); free(tmp); }
b4ld.c
/* ****************************************************************************** * BSIM4 4.8.1 released by Chetan Kumar Dabhi 2/15/2017 * * BSIM4 Model Equations * ****************************************************************************** ****************************************************************************** * Copyright 2017 Regents of the University of California. * * All rights reserved. * * * * Project Director: Prof. Chenming Hu. * * Authors: Gary W. Ng, Weidong Liu, Xuemei Xi, Mohan Dunga, Wenwei Yang * * Ali Niknejad, Shivendra Singh Parihar, Chetan Kumar Dabhi * * Yogesh Singh Chauhan, Sayeef Salahuddin, Chenming Hu * ****************************************************************************** ****************************************************************************** * CMC In-Code Statement * * * * The Developer agrees that the following statement will appear in the * * model code that has been adopted as a CMC Standard. * * * * Software is distributed as is, completely without warranty or service * * support. The University of California and its employees are not liable * * for the condition or performance of the software. * * * * The University of California owns the copyright and grants users a * * perpetual, irrevocable, worldwide, non-exclusive, royalty-free license * * with respect to the software as set forth below. * * * * The University of California hereby disclaims all implied warranties. * * * * The University of California grants the users the right to modify, * * copy, and redistribute the software and documentation, both within * * the user's organization and externally, subject to the following * * restrictions: * * * * 1. The users agree not to charge for the University of California code * * itself but may charge for additions, extensions, or support. * * * * 2. In any product based on the software, the users agree to * * acknowledge the University of California that developed the * * software. This acknowledgment shall appear in the product * * documentation. * * * * 3. Redistributions to others of source code and documentation must * * retain the copyright notice, disclaimer, and list of conditions. * * * * 4. Redistributions to others in binary form must reproduce the * * copyright notice, disclaimer, and list of conditions in the * * documentation and/or other materials provided with the * * distribution. * * * * Agreed to on ______Feb. 15, 2017______________ * * * * By: ____University of California, Berkeley___ * * ____Chenming Hu__________________________ * * ____Professor in Graduate School ________ * * * ****************************************************************************** */ /**** OpenMP support ngspice 06/28/2010 ****/ #include "ngspice/ngspice.h" #include "ngspice/cktdefs.h" #include "bsim4def.h" #include "ngspice/trandefs.h" #include "ngspice/const.h" #include "ngspice/sperror.h" #include "ngspice/devdefs.h" #include "ngspice/suffix.h" #define MAX_EXPL 2.688117142e+43 #define MIN_EXPL 3.720075976e-44 #define EXPL_THRESHOLD 100.0 #define MAX_EXP 5.834617425e14 #define MIN_EXP 1.713908431e-15 #define EXP_THRESHOLD 34.0 #define EPS0 8.85418e-12 #define EPSSI 1.03594e-10 #define Charge_q 1.60219e-19 #define DELTA_1 0.02 #define DELTA_2 0.02 #define DELTA_3 0.02 #define DELTA_4 0.02 #define MM 3 /* smooth coeff */ #define DEXP(A,B,C) { \ if (A > EXP_THRESHOLD) { \ B = MAX_EXP*(1.0+(A)-EXP_THRESHOLD); \ C = MAX_EXP; \ } else if (A < -EXP_THRESHOLD) { \ B = MIN_EXP; \ C = 0; \ } else { \ B = exp(A); \ C = B; \ } \ } #ifdef USE_OMP int BSIM4LoadOMP(BSIM4instance *here, CKTcircuit *ckt); void BSIM4LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt); #endif int BSIM4polyDepletion(double phi, double ngate,double epsgate, double coxe, double Vgs, double *Vgs_eff, double *dVgs_eff_dVg); int BSIM4load( GENmodel *inModel, CKTcircuit *ckt) { #ifdef USE_OMP int idx; BSIM4model *model = (BSIM4model*)inModel; int error = 0; BSIM4instance **InstArray; InstArray = model->BSIM4InstanceArray; #pragma omp parallel for for (idx = 0; idx < model->BSIM4InstCount; idx++) { BSIM4instance *here = InstArray[idx]; int local_error = BSIM4LoadOMP(here, ckt); if (local_error) error = local_error; } BSIM4LoadRhsMat(inModel, ckt); return error; } int BSIM4LoadOMP(BSIM4instance *here, CKTcircuit *ckt) { BSIM4model *model = BSIM4modPtr(here); #else BSIM4model *model = (BSIM4model*)inModel; BSIM4instance *here; #endif double ceqgstot, dgstot_dvd, dgstot_dvg, dgstot_dvs, dgstot_dvb; double ceqgdtot, dgdtot_dvd, dgdtot_dvg, dgdtot_dvs, dgdtot_dvb; double gstot, gstotd, gstotg, gstots, gstotb, gspr, Rs, Rd; double gdtot, gdtotd, gdtotg, gdtots, gdtotb, gdpr; double vgs_eff, vgd_eff, dvgs_eff_dvg, dvgd_eff_dvg; double dRs_dvg, dRd_dvg, dRs_dvb, dRd_dvb; double dT0_dvg, dT1_dvb, dT3_dvg, dT3_dvb; double vses, vdes, vdedo, delvses, delvded, delvdes; double Isestot, cseshat, Idedtot, cdedhat; #ifndef NEWCONV double tol0, tol1, tol2, tol3, tol4, tol5, tol6; #endif double geltd, gcrg, gcrgg, gcrgd, gcrgs, gcrgb, ceqgcrg; double vges, vgms, vgedo, vgmdo, vged, vgmd, delvged, delvgmd; double delvges, delvgms, vgmb; double gcgmgmb=0.0, gcgmdb=0.0, gcgmsb=0.0, gcdgmb, gcsgmb; double gcgmbb=0.0, gcbgmb, qgmb, qgmid=0.0, ceqqgmid; double vbd, vbs, vds, vgb, vgd, vgs, vgdo; #ifndef PREDICTOR double xfact; #endif double vdbs, vdbd, vsbs, vsbdo, vsbd; double delvdbs, delvdbd, delvsbs; double delvbd_jct, delvbs_jct, vbs_jct, vbd_jct; double SourceSatCurrent, DrainSatCurrent; double ag0, qgb, von, cbhat, VgstNVt, ExpVgst; double ceqqb, ceqqd, ceqqg, ceqqjd=0.0, ceqqjs=0.0, ceq, geq; double cdrain, cdhat, ceqdrn, ceqbd, ceqbs, ceqjd, ceqjs, gjbd, gjbs; double czbd, czbdsw, czbdswg, czbs, czbssw, czbsswg, evbd, evbs, arg, sarg; double delvbd, delvbs, delvds, delvgd, delvgs; double Vfbeff, dVfbeff_dVg, dVfbeff_dVb, V3, V4; double gcbdb, gcbgb, gcbsb, gcddb, gcdgb, gcdsb, gcgdb, gcggb, gcgsb, gcsdb; double gcgbb, gcdbb, gcsbb, gcbbb; double gcdbdb, gcsbsb; double gcsgb, gcssb, MJD, MJSWD, MJSWGD, MJS, MJSWS, MJSWGS; double qgate=0.0, qbulk=0.0, qdrn=0.0, qsrc, cqgate, cqbody, cqdrn; double Vdb, Vds, Vgs, Vbs, Gmbs, FwdSum, RevSum; double Igidl, Ggidld, Ggidlg, Ggidlb; double Voxacc=0.0, dVoxacc_dVg=0.0, dVoxacc_dVb=0.0; double Voxdepinv=0.0, dVoxdepinv_dVg=0.0, dVoxdepinv_dVd=0.0, dVoxdepinv_dVb=0.0; double VxNVt=0.0, ExpVxNVt, Vaux=0.0, dVaux_dVg=0.0, dVaux_dVd=0.0, dVaux_dVb=0.0; double Igc, dIgc_dVg, dIgc_dVd, dIgc_dVb; double Igcs, dIgcs_dVg, dIgcs_dVd, dIgcs_dVb; double Igcd, dIgcd_dVg, dIgcd_dVd, dIgcd_dVb; double Igs, dIgs_dVg, dIgs_dVs, Igd, dIgd_dVg, dIgd_dVd; double Igbacc, dIgbacc_dVg, dIgbacc_dVb; double Igbinv, dIgbinv_dVg, dIgbinv_dVd, dIgbinv_dVb; double Pigcd, dPigcd_dVg, dPigcd_dVd, dPigcd_dVb; double Istoteq, gIstotg, gIstotd, gIstots, gIstotb; double Idtoteq, gIdtotg, gIdtotd, gIdtots, gIdtotb; double Ibtoteq, gIbtotg, gIbtotd, gIbtots, gIbtotb; double Igtoteq, gIgtotg, gIgtotd, gIgtots, gIgtotb; double Igstot, cgshat, Igdtot, cgdhat, Igbtot, cgbhat; double Vgs_eff, Vfb=0.0, Vth_NarrowW; /* double Vgd_eff, dVgd_eff_dVg; v4.7.0 */ double Phis, dPhis_dVb, sqrtPhis, dsqrtPhis_dVb, Vth, dVth_dVb, dVth_dVd; double Vgst, dVgst_dVg, dVgst_dVb, dVgs_eff_dVg, Nvtms, Nvtmd; double Vtm, Vtm0; double n, dn_dVb, dn_dVd, voffcv, noff, dnoff_dVd, dnoff_dVb; double V0, CoxWLcen, QovCox, LINK; double DeltaPhi, dDeltaPhi_dVg, VgDP, dVgDP_dVg; double Cox, Tox, Tcen, dTcen_dVg, dTcen_dVd, dTcen_dVb; double Ccen, Coxeff, dCoxeff_dVd, dCoxeff_dVg, dCoxeff_dVb; double Denomi, dDenomi_dVg, dDenomi_dVd, dDenomi_dVb; double ueff, dueff_dVg, dueff_dVd, dueff_dVb; double Esat, Vdsat; double EsatL, dEsatL_dVg, dEsatL_dVd, dEsatL_dVb; double dVdsat_dVg, dVdsat_dVb, dVdsat_dVd, Vasat, dAlphaz_dVg, dAlphaz_dVb; double dVasat_dVg, dVasat_dVb, dVasat_dVd, Va, dVa_dVd, dVa_dVg, dVa_dVb; double Vbseff, dVbseff_dVb, VbseffCV, dVbseffCV_dVb; double VgsteffVth, dT11_dVg; double Arg1, One_Third_CoxWL, Two_Third_CoxWL, Alphaz, CoxWL; double T0=0.0, dT0_dVg, dT0_dVd, dT0_dVb; double T1, dT1_dVg, dT1_dVd, dT1_dVb; double T2, dT2_dVg, dT2_dVd, dT2_dVb; double T3, dT3_dVg, dT3_dVd, dT3_dVb; double T4, dT4_dVd, dT4_dVb; double T5, dT5_dVg, dT5_dVd, dT5_dVb; double T6, dT6_dVg, dT6_dVd, dT6_dVb; double T7, dT7_dVg, dT7_dVd, dT7_dVb; double T8, dT8_dVg, dT8_dVd, dT8_dVb; double T9, dT9_dVg, dT9_dVd, dT9_dVb; double T10, dT10_dVg, dT10_dVb, dT10_dVd; double T11, T12, T13, T14; double tmp, Abulk, dAbulk_dVb, Abulk0, dAbulk0_dVb; double Cclm, dCclm_dVg, dCclm_dVd, dCclm_dVb; double FP, dFP_dVg, PvagTerm, dPvagTerm_dVg, dPvagTerm_dVd, dPvagTerm_dVb; double VADITS, dVADITS_dVg, dVADITS_dVd; double Lpe_Vb, dDITS_Sft_dVb, dDITS_Sft_dVd; double DITS_Sft2, dDITS_Sft2_dVd; /* v4.7 New DITS */ double VACLM, dVACLM_dVg, dVACLM_dVd, dVACLM_dVb; double VADIBL, dVADIBL_dVg, dVADIBL_dVd, dVADIBL_dVb; double Xdep, dXdep_dVb, lt1, dlt1_dVb, ltw, dltw_dVb, Delt_vth, dDelt_vth_dVb; double Theta0, dTheta0_dVb; double TempRatio, tmp1, tmp2, tmp3, tmp4; double DIBL_Sft, dDIBL_Sft_dVd, Lambda, dLambda_dVg; double Idtot, Ibtot, a1, ScalingFactor; double Vgsteff, dVgsteff_dVg, dVgsteff_dVd, dVgsteff_dVb; double Vdseff, dVdseff_dVg, dVdseff_dVd, dVdseff_dVb; double VdseffCV, dVdseffCV_dVg, dVdseffCV_dVd, dVdseffCV_dVb; double diffVds, dAbulk_dVg; double beta, dbeta_dVg, dbeta_dVd, dbeta_dVb; double gche, dgche_dVg, dgche_dVd, dgche_dVb; double fgche1, dfgche1_dVg, dfgche1_dVd, dfgche1_dVb; double fgche2, dfgche2_dVg, dfgche2_dVd, dfgche2_dVb; double Idl, dIdl_dVg, dIdl_dVd, dIdl_dVb; double Idsa, dIdsa_dVg, dIdsa_dVd, dIdsa_dVb; double Ids, Gm, Gds, Gmb, devbs_dvb, devbd_dvb; double Isub, Gbd, Gbg, Gbb; double VASCBE, dVASCBE_dVg, dVASCBE_dVd, dVASCBE_dVb; double CoxeffWovL; double Rds, dRds_dVg, dRds_dVb, WVCox, WVCoxRds; double Vgst2Vtm, VdsatCV; double Leff, Weff, dWeff_dVg, dWeff_dVb; double AbulkCV, dAbulkCV_dVb; double qcheq, qdef, gqdef=0.0, cqdef=0.0, cqcheq=0.0; double gcqdb=0.0, gcqsb=0.0, gcqgb=0.0, gcqbb=0.0; double dxpart, sxpart, ggtg, ggtd, ggts, ggtb; double ddxpart_dVd, ddxpart_dVg, ddxpart_dVb, ddxpart_dVs; double dsxpart_dVd, dsxpart_dVg, dsxpart_dVb, dsxpart_dVs; double gbspsp, gbbdp, gbbsp, gbspg, gbspb, gbspdp; double gbdpdp, gbdpg, gbdpb, gbdpsp; double qgdo, qgso, cgdo, cgso; double Cgg, Cgd, Cgb, Cdg, Cdd, Cds; double Csg, Csd, Css, Csb, Cbg, Cbd, Cbb; double Cgg1, Cgd1, Cgb1, Cbg1, Cbb1, Cbd1, Qac0, Qsub0; double dQac0_dVg, dQac0_dVb, dQsub0_dVg, dQsub0_dVd, dQsub0_dVb; double ggidld, ggidlg, ggidlb, ggislg, ggislb, ggisls; double Igisl, Ggislg, Ggislb, Ggisls; double Nvtmrss, Nvtmrssws, Nvtmrsswgs; double Nvtmrsd, Nvtmrsswd, Nvtmrsswgd; double vs, Fsevl, dvs_dVg, dvs_dVd, dvs_dVb, dFsevl_dVg, dFsevl_dVd, dFsevl_dVb; double vgdx, vgsx, epssub, toxe, epsrox; struct bsim4SizeDependParam *pParam; int ByPass, ChargeComputationNeeded, error, Check, Check1, Check2; double m; ScalingFactor = 1.0e-9; ChargeComputationNeeded = ((ckt->CKTmode & (MODEDCTRANCURVE | MODEAC | MODETRAN | MODEINITSMSIG)) || ((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC))) ? 1 : 0; #ifndef USE_OMP for (; model != NULL; model = BSIM4nextModel(model)) { for (here = BSIM4instances(model); here != NULL; here = BSIM4nextInstance(here)) { #endif Check = Check1 = Check2 = 1; ByPass = 0; pParam = here->pParam; if ((ckt->CKTmode & MODEINITSMSIG)) { vds = *(ckt->CKTstate0 + here->BSIM4vds); vgs = *(ckt->CKTstate0 + here->BSIM4vgs); vbs = *(ckt->CKTstate0 + here->BSIM4vbs); vges = *(ckt->CKTstate0 + here->BSIM4vges); vgms = *(ckt->CKTstate0 + here->BSIM4vgms); vdbs = *(ckt->CKTstate0 + here->BSIM4vdbs); vsbs = *(ckt->CKTstate0 + here->BSIM4vsbs); vses = *(ckt->CKTstate0 + here->BSIM4vses); vdes = *(ckt->CKTstate0 + here->BSIM4vdes); qdef = *(ckt->CKTstate0 + here->BSIM4qdef); } else if ((ckt->CKTmode & MODEINITTRAN)) { vds = *(ckt->CKTstate1 + here->BSIM4vds); vgs = *(ckt->CKTstate1 + here->BSIM4vgs); vbs = *(ckt->CKTstate1 + here->BSIM4vbs); vges = *(ckt->CKTstate1 + here->BSIM4vges); vgms = *(ckt->CKTstate1 + here->BSIM4vgms); vdbs = *(ckt->CKTstate1 + here->BSIM4vdbs); vsbs = *(ckt->CKTstate1 + here->BSIM4vsbs); vses = *(ckt->CKTstate1 + here->BSIM4vses); vdes = *(ckt->CKTstate1 + here->BSIM4vdes); qdef = *(ckt->CKTstate1 + here->BSIM4qdef); } else if ((ckt->CKTmode & MODEINITJCT) && !here->BSIM4off) { vds = model->BSIM4type * here->BSIM4icVDS; vgs = vges = vgms = model->BSIM4type * here->BSIM4icVGS; vbs = vdbs = vsbs = model->BSIM4type * here->BSIM4icVBS; if (vds > 0.0) { vdes = vds + 0.01; vses = -0.01; } else if (vds < 0.0) { vdes = vds - 0.01; vses = 0.01; } else vdes = vses = 0.0; qdef = 0.0; if ((vds == 0.0) && (vgs == 0.0) && (vbs == 0.0) && ((ckt->CKTmode & (MODETRAN | MODEAC|MODEDCOP | MODEDCTRANCURVE)) || (!(ckt->CKTmode & MODEUIC)))) { vds = 0.1; vdes = 0.11; vses = -0.01; vgs = vges = vgms = model->BSIM4type * here->BSIM4vth0 + 0.1; vbs = vdbs = vsbs = 0.0; } } else if ((ckt->CKTmode & (MODEINITJCT | MODEINITFIX)) && (here->BSIM4off)) { vds = vgs = vbs = vges = vgms = 0.0; vdbs = vsbs = vdes = vses = qdef = 0.0; } else { #ifndef PREDICTOR if ((ckt->CKTmode & MODEINITPRED)) { xfact = ckt->CKTdelta / ckt->CKTdeltaOld[1]; *(ckt->CKTstate0 + here->BSIM4vds) = *(ckt->CKTstate1 + here->BSIM4vds); vds = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vds)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vds))); *(ckt->CKTstate0 + here->BSIM4vgs) = *(ckt->CKTstate1 + here->BSIM4vgs); vgs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vgs)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vgs))); *(ckt->CKTstate0 + here->BSIM4vges) = *(ckt->CKTstate1 + here->BSIM4vges); vges = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vges)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vges))); *(ckt->CKTstate0 + here->BSIM4vgms) = *(ckt->CKTstate1 + here->BSIM4vgms); vgms = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vgms)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vgms))); *(ckt->CKTstate0 + here->BSIM4vbs) = *(ckt->CKTstate1 + here->BSIM4vbs); vbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vbs)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vbs))); *(ckt->CKTstate0 + here->BSIM4vbd) = *(ckt->CKTstate0 + here->BSIM4vbs) - *(ckt->CKTstate0 + here->BSIM4vds); *(ckt->CKTstate0 + here->BSIM4vdbs) = *(ckt->CKTstate1 + here->BSIM4vdbs); vdbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vdbs)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vdbs))); *(ckt->CKTstate0 + here->BSIM4vdbd) = *(ckt->CKTstate0 + here->BSIM4vdbs) - *(ckt->CKTstate0 + here->BSIM4vds); *(ckt->CKTstate0 + here->BSIM4vsbs) = *(ckt->CKTstate1 + here->BSIM4vsbs); vsbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vsbs)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vsbs))); *(ckt->CKTstate0 + here->BSIM4vses) = *(ckt->CKTstate1 + here->BSIM4vses); vses = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vses)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vses))); *(ckt->CKTstate0 + here->BSIM4vdes) = *(ckt->CKTstate1 + here->BSIM4vdes); vdes = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4vdes)) - (xfact * (*(ckt->CKTstate2 + here->BSIM4vdes))); *(ckt->CKTstate0 + here->BSIM4qdef) = *(ckt->CKTstate1 + here->BSIM4qdef); qdef = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4qdef)) -(xfact * (*(ckt->CKTstate2 + here->BSIM4qdef))); } else { #endif /* PREDICTOR */ vds = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4dNodePrime) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vgs = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4gNodePrime) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vbs = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4bNodePrime) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vges = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4gNodeExt) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vgms = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4gNodeMid) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vdbs = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4dbNode) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vsbs = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4sbNode) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vses = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4sNode) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); vdes = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4dNode) - *(ckt->CKTrhsOld + here->BSIM4sNodePrime)); qdef = model->BSIM4type * (*(ckt->CKTrhsOld + here->BSIM4qNode)); #ifndef PREDICTOR } #endif /* PREDICTOR */ vgdo = *(ckt->CKTstate0 + here->BSIM4vgs) - *(ckt->CKTstate0 + here->BSIM4vds); vgedo = *(ckt->CKTstate0 + here->BSIM4vges) - *(ckt->CKTstate0 + here->BSIM4vds); vgmdo = *(ckt->CKTstate0 + here->BSIM4vgms) - *(ckt->CKTstate0 + here->BSIM4vds); vbd = vbs - vds; vdbd = vdbs - vds; vgd = vgs - vds; vged = vges - vds; vgmd = vgms - vds; delvbd = vbd - *(ckt->CKTstate0 + here->BSIM4vbd); delvdbd = vdbd - *(ckt->CKTstate0 + here->BSIM4vdbd); delvgd = vgd - vgdo; delvged = vged - vgedo; delvgmd = vgmd - vgmdo; delvds = vds - *(ckt->CKTstate0 + here->BSIM4vds); delvgs = vgs - *(ckt->CKTstate0 + here->BSIM4vgs); delvges = vges - *(ckt->CKTstate0 + here->BSIM4vges); delvgms = vgms - *(ckt->CKTstate0 + here->BSIM4vgms); delvbs = vbs - *(ckt->CKTstate0 + here->BSIM4vbs); delvdbs = vdbs - *(ckt->CKTstate0 + here->BSIM4vdbs); delvsbs = vsbs - *(ckt->CKTstate0 + here->BSIM4vsbs); delvses = vses - (*(ckt->CKTstate0 + here->BSIM4vses)); vdedo = *(ckt->CKTstate0 + here->BSIM4vdes) - *(ckt->CKTstate0 + here->BSIM4vds); delvdes = vdes - *(ckt->CKTstate0 + here->BSIM4vdes); delvded = vdes - vds - vdedo; delvbd_jct = (!here->BSIM4rbodyMod) ? delvbd : delvdbd; delvbs_jct = (!here->BSIM4rbodyMod) ? delvbs : delvsbs; if (here->BSIM4mode >= 0) { Idtot = here->BSIM4cd + here->BSIM4csub - here->BSIM4cbd + here->BSIM4Igidl; cdhat = Idtot - here->BSIM4gbd * delvbd_jct + (here->BSIM4gmbs + here->BSIM4gbbs + here->BSIM4ggidlb) * delvbs + (here->BSIM4gm + here->BSIM4gbgs + here->BSIM4ggidlg) * delvgs + (here->BSIM4gds + here->BSIM4gbds + here->BSIM4ggidld) * delvds; Ibtot = here->BSIM4cbs + here->BSIM4cbd - here->BSIM4Igidl - here->BSIM4Igisl - here->BSIM4csub; cbhat = Ibtot + here->BSIM4gbd * delvbd_jct + here->BSIM4gbs * delvbs_jct - (here->BSIM4gbbs + here->BSIM4ggidlb) * delvbs - (here->BSIM4gbgs + here->BSIM4ggidlg) * delvgs - (here->BSIM4gbds + here->BSIM4ggidld - here->BSIM4ggisls) * delvds - here->BSIM4ggislg * delvgd - here->BSIM4ggislb* delvbd; Igstot = here->BSIM4Igs + here->BSIM4Igcs; cgshat = Igstot + (here->BSIM4gIgsg + here->BSIM4gIgcsg) * delvgs + here->BSIM4gIgcsd * delvds + here->BSIM4gIgcsb * delvbs; Igdtot = here->BSIM4Igd + here->BSIM4Igcd; cgdhat = Igdtot + here->BSIM4gIgdg * delvgd + here->BSIM4gIgcdg * delvgs + here->BSIM4gIgcdd * delvds + here->BSIM4gIgcdb * delvbs; Igbtot = here->BSIM4Igb; cgbhat = here->BSIM4Igb + here->BSIM4gIgbg * delvgs + here->BSIM4gIgbd * delvds + here->BSIM4gIgbb * delvbs; } else { Idtot = here->BSIM4cd + here->BSIM4cbd - here->BSIM4Igidl; /* bugfix */ cdhat = Idtot + here->BSIM4gbd * delvbd_jct + here->BSIM4gmbs * delvbd + here->BSIM4gm * delvgd - (here->BSIM4gds + here->BSIM4ggidls) * delvds - here->BSIM4ggidlg * delvgs - here->BSIM4ggidlb * delvbs; Ibtot = here->BSIM4cbs + here->BSIM4cbd - here->BSIM4Igidl - here->BSIM4Igisl - here->BSIM4csub; cbhat = Ibtot + here->BSIM4gbs * delvbs_jct + here->BSIM4gbd * delvbd_jct - (here->BSIM4gbbs + here->BSIM4ggislb) * delvbd - (here->BSIM4gbgs + here->BSIM4ggislg) * delvgd + (here->BSIM4gbds + here->BSIM4ggisld - here->BSIM4ggidls) * delvds - here->BSIM4ggidlg * delvgs - here->BSIM4ggidlb * delvbs; Igstot = here->BSIM4Igs + here->BSIM4Igcd; cgshat = Igstot + here->BSIM4gIgsg * delvgs + here->BSIM4gIgcdg * delvgd - here->BSIM4gIgcdd * delvds + here->BSIM4gIgcdb * delvbd; Igdtot = here->BSIM4Igd + here->BSIM4Igcs; cgdhat = Igdtot + (here->BSIM4gIgdg + here->BSIM4gIgcsg) * delvgd - here->BSIM4gIgcsd * delvds + here->BSIM4gIgcsb * delvbd; Igbtot = here->BSIM4Igb; cgbhat = here->BSIM4Igb + here->BSIM4gIgbg * delvgd - here->BSIM4gIgbd * delvds + here->BSIM4gIgbb * delvbd; } Isestot = here->BSIM4gstot * (*(ckt->CKTstate0 + here->BSIM4vses)); cseshat = Isestot + here->BSIM4gstot * delvses + here->BSIM4gstotd * delvds + here->BSIM4gstotg * delvgs + here->BSIM4gstotb * delvbs; Idedtot = here->BSIM4gdtot * vdedo; cdedhat = Idedtot + here->BSIM4gdtot * delvded + here->BSIM4gdtotd * delvds + here->BSIM4gdtotg * delvgs + here->BSIM4gdtotb * delvbs; #ifndef NOBYPASS /* Following should be one IF statement, but some C compilers * can't handle that all at once, so we split it into several * successive IF's */ if ((!(ckt->CKTmode & MODEINITPRED)) && (ckt->CKTbypass)) if ((fabs(delvds) < (ckt->CKTreltol * MAX(fabs(vds), fabs(*(ckt->CKTstate0 + here->BSIM4vds))) + ckt->CKTvoltTol))) if ((fabs(delvgs) < (ckt->CKTreltol * MAX(fabs(vgs), fabs(*(ckt->CKTstate0 + here->BSIM4vgs))) + ckt->CKTvoltTol))) if ((fabs(delvbs) < (ckt->CKTreltol * MAX(fabs(vbs), fabs(*(ckt->CKTstate0 + here->BSIM4vbs))) + ckt->CKTvoltTol))) if ((fabs(delvbd) < (ckt->CKTreltol * MAX(fabs(vbd), fabs(*(ckt->CKTstate0 + here->BSIM4vbd))) + ckt->CKTvoltTol))) if ((here->BSIM4rgateMod == 0) || (here->BSIM4rgateMod == 1) || (fabs(delvges) < (ckt->CKTreltol * MAX(fabs(vges), fabs(*(ckt->CKTstate0 + here->BSIM4vges))) + ckt->CKTvoltTol))) if ((here->BSIM4rgateMod != 3) || (fabs(delvgms) < (ckt->CKTreltol * MAX(fabs(vgms), fabs(*(ckt->CKTstate0 + here->BSIM4vgms))) + ckt->CKTvoltTol))) if ((!here->BSIM4rbodyMod) || (fabs(delvdbs) < (ckt->CKTreltol * MAX(fabs(vdbs), fabs(*(ckt->CKTstate0 + here->BSIM4vdbs))) + ckt->CKTvoltTol))) if ((!here->BSIM4rbodyMod) || (fabs(delvdbd) < (ckt->CKTreltol * MAX(fabs(vdbd), fabs(*(ckt->CKTstate0 + here->BSIM4vdbd))) + ckt->CKTvoltTol))) if ((!here->BSIM4rbodyMod) || (fabs(delvsbs) < (ckt->CKTreltol * MAX(fabs(vsbs), fabs(*(ckt->CKTstate0 + here->BSIM4vsbs))) + ckt->CKTvoltTol))) if ((!model->BSIM4rdsMod) || (fabs(delvses) < (ckt->CKTreltol * MAX(fabs(vses), fabs(*(ckt->CKTstate0 + here->BSIM4vses))) + ckt->CKTvoltTol))) if ((!model->BSIM4rdsMod) || (fabs(delvdes) < (ckt->CKTreltol * MAX(fabs(vdes), fabs(*(ckt->CKTstate0 + here->BSIM4vdes))) + ckt->CKTvoltTol))) if ((fabs(cdhat - Idtot) < ckt->CKTreltol * MAX(fabs(cdhat), fabs(Idtot)) + ckt->CKTabstol)) if ((fabs(cbhat - Ibtot) < ckt->CKTreltol * MAX(fabs(cbhat), fabs(Ibtot)) + ckt->CKTabstol)) if ((!model->BSIM4igcMod) || ((fabs(cgshat - Igstot) < ckt->CKTreltol * MAX(fabs(cgshat), fabs(Igstot)) + ckt->CKTabstol))) if ((!model->BSIM4igcMod) || ((fabs(cgdhat - Igdtot) < ckt->CKTreltol * MAX(fabs(cgdhat), fabs(Igdtot)) + ckt->CKTabstol))) if ((!model->BSIM4igbMod) || ((fabs(cgbhat - Igbtot) < ckt->CKTreltol * MAX(fabs(cgbhat), fabs(Igbtot)) + ckt->CKTabstol))) if ((!model->BSIM4rdsMod) || ((fabs(cseshat - Isestot) < ckt->CKTreltol * MAX(fabs(cseshat), fabs(Isestot)) + ckt->CKTabstol))) if ((!model->BSIM4rdsMod) || ((fabs(cdedhat - Idedtot) < ckt->CKTreltol * MAX(fabs(cdedhat), fabs(Idedtot)) + ckt->CKTabstol))) { vds = *(ckt->CKTstate0 + here->BSIM4vds); vgs = *(ckt->CKTstate0 + here->BSIM4vgs); vbs = *(ckt->CKTstate0 + here->BSIM4vbs); vges = *(ckt->CKTstate0 + here->BSIM4vges); vgms = *(ckt->CKTstate0 + here->BSIM4vgms); vbd = *(ckt->CKTstate0 + here->BSIM4vbd); vdbs = *(ckt->CKTstate0 + here->BSIM4vdbs); vdbd = *(ckt->CKTstate0 + here->BSIM4vdbd); vsbs = *(ckt->CKTstate0 + here->BSIM4vsbs); vses = *(ckt->CKTstate0 + here->BSIM4vses); vdes = *(ckt->CKTstate0 + here->BSIM4vdes); vgd = vgs - vds; vgb = vgs - vbs; vged = vges - vds; vgmd = vgms - vds; vgmb = vgms - vbs; vbs_jct = (!here->BSIM4rbodyMod) ? vbs : vsbs; vbd_jct = (!here->BSIM4rbodyMod) ? vbd : vdbd; /*** qdef should not be kept fixed even if vgs, vds & vbs has converged **** qdef = *(ckt->CKTstate0 + here->BSIM4qdef); ***/ cdrain = here->BSIM4cd; if ((ckt->CKTmode & (MODETRAN | MODEAC)) || ((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC))) { ByPass = 1; qgate = here->BSIM4qgate; qbulk = here->BSIM4qbulk; qdrn = here->BSIM4qdrn; cgdo = here->BSIM4cgdo; qgdo = here->BSIM4qgdo; cgso = here->BSIM4cgso; qgso = here->BSIM4qgso; goto line755; } else goto line850; } #endif /*NOBYPASS*/ von = here->BSIM4von; if (*(ckt->CKTstate0 + here->BSIM4vds) >= 0.0) { vgs = DEVfetlim(vgs, *(ckt->CKTstate0 + here->BSIM4vgs), von); vds = vgs - vgd; vds = DEVlimvds(vds, *(ckt->CKTstate0 + here->BSIM4vds)); vgd = vgs - vds; if (here->BSIM4rgateMod == 3) { vges = DEVfetlim(vges, *(ckt->CKTstate0 + here->BSIM4vges), von); vgms = DEVfetlim(vgms, *(ckt->CKTstate0 + here->BSIM4vgms), von); vged = vges - vds; vgmd = vgms - vds; } else if ((here->BSIM4rgateMod == 1) || (here->BSIM4rgateMod == 2)) { vges = DEVfetlim(vges, *(ckt->CKTstate0 + here->BSIM4vges), von); vged = vges - vds; } if (model->BSIM4rdsMod) { vdes = DEVlimvds(vdes, *(ckt->CKTstate0 + here->BSIM4vdes)); vses = -DEVlimvds(-vses, -(*(ckt->CKTstate0 + here->BSIM4vses))); } } else { vgd = DEVfetlim(vgd, vgdo, von); vds = vgs - vgd; vds = -DEVlimvds(-vds, -(*(ckt->CKTstate0 + here->BSIM4vds))); vgs = vgd + vds; if (here->BSIM4rgateMod == 3) { vged = DEVfetlim(vged, vgedo, von); vges = vged + vds; vgmd = DEVfetlim(vgmd, vgmdo, von); vgms = vgmd + vds; } if ((here->BSIM4rgateMod == 1) || (here->BSIM4rgateMod == 2)) { vged = DEVfetlim(vged, vgedo, von); vges = vged + vds; } if (model->BSIM4rdsMod) { vdes = -DEVlimvds(-vdes, -(*(ckt->CKTstate0 + here->BSIM4vdes))); vses = DEVlimvds(vses, *(ckt->CKTstate0 + here->BSIM4vses)); } } if (vds >= 0.0) { vbs = DEVpnjlim(vbs, *(ckt->CKTstate0 + here->BSIM4vbs), CONSTvt0, model->BSIM4vcrit, &Check); vbd = vbs - vds; if (here->BSIM4rbodyMod) { vdbs = DEVpnjlim(vdbs, *(ckt->CKTstate0 + here->BSIM4vdbs), CONSTvt0, model->BSIM4vcrit, &Check1); vdbd = vdbs - vds; vsbs = DEVpnjlim(vsbs, *(ckt->CKTstate0 + here->BSIM4vsbs), CONSTvt0, model->BSIM4vcrit, &Check2); if ((Check1 == 0) && (Check2 == 0)) Check = 0; else Check = 1; } } else { vbd = DEVpnjlim(vbd, *(ckt->CKTstate0 + here->BSIM4vbd), CONSTvt0, model->BSIM4vcrit, &Check); vbs = vbd + vds; if (here->BSIM4rbodyMod) { vdbd = DEVpnjlim(vdbd, *(ckt->CKTstate0 + here->BSIM4vdbd), CONSTvt0, model->BSIM4vcrit, &Check1); vdbs = vdbd + vds; vsbdo = *(ckt->CKTstate0 + here->BSIM4vsbs) - *(ckt->CKTstate0 + here->BSIM4vds); vsbd = vsbs - vds; vsbd = DEVpnjlim(vsbd, vsbdo, CONSTvt0, model->BSIM4vcrit, &Check2); vsbs = vsbd + vds; if ((Check1 == 0) && (Check2 == 0)) Check = 0; else Check = 1; } } } /* Calculate DC currents and their derivatives */ vbd = vbs - vds; vgd = vgs - vds; vgb = vgs - vbs; vged = vges - vds; vgmd = vgms - vds; vgmb = vgms - vbs; vdbd = vdbs - vds; vbs_jct = (!here->BSIM4rbodyMod) ? vbs : vsbs; vbd_jct = (!here->BSIM4rbodyMod) ? vbd : vdbd; /* Source/drain junction diode DC model begins */ Nvtms = model->BSIM4vtm * model->BSIM4SjctEmissionCoeff; /* if ((here->BSIM4Aseff <= 0.0) && (here->BSIM4Pseff <= 0.0)) { SourceSatCurrent = 1.0e-14; } v4.7 */ if ((here->BSIM4Aseff <= 0.0) && (here->BSIM4Pseff <= 0.0)) { SourceSatCurrent = 0.0; } else { SourceSatCurrent = here->BSIM4Aseff * model->BSIM4SjctTempSatCurDensity + here->BSIM4Pseff * model->BSIM4SjctSidewallTempSatCurDensity + pParam->BSIM4weffCJ * here->BSIM4nf * model->BSIM4SjctGateSidewallTempSatCurDensity; } if (SourceSatCurrent <= 0.0) { here->BSIM4gbs = ckt->CKTgmin; here->BSIM4cbs = here->BSIM4gbs * vbs_jct; } else { switch(model->BSIM4dioMod) { case 0: evbs = exp(vbs_jct / Nvtms); T1 = model->BSIM4xjbvs * exp(-(model->BSIM4bvs + vbs_jct) / Nvtms); /* WDLiu: Magic T1 in this form; different from BSIM4 beta. */ here->BSIM4gbs = SourceSatCurrent * (evbs + T1) / Nvtms + ckt->CKTgmin; here->BSIM4cbs = SourceSatCurrent * (evbs + here->BSIM4XExpBVS - T1 - 1.0) + ckt->CKTgmin * vbs_jct; break; case 1: T2 = vbs_jct / Nvtms; if (T2 < -EXP_THRESHOLD) { here->BSIM4gbs = ckt->CKTgmin; here->BSIM4cbs = SourceSatCurrent * (MIN_EXP - 1.0) + ckt->CKTgmin * vbs_jct; } else if (vbs_jct <= here->BSIM4vjsmFwd) { evbs = exp(T2); here->BSIM4gbs = SourceSatCurrent * evbs / Nvtms + ckt->CKTgmin; here->BSIM4cbs = SourceSatCurrent * (evbs - 1.0) + ckt->CKTgmin * vbs_jct; } else { T0 = here->BSIM4IVjsmFwd / Nvtms; here->BSIM4gbs = T0 + ckt->CKTgmin; here->BSIM4cbs = here->BSIM4IVjsmFwd - SourceSatCurrent + T0 * (vbs_jct - here->BSIM4vjsmFwd) + ckt->CKTgmin * vbs_jct; } break; case 2: if (vbs_jct < here->BSIM4vjsmRev) { T0 = vbs_jct / Nvtms; if (T0 < -EXP_THRESHOLD) { evbs = MIN_EXP; devbs_dvb = 0.0; } else { evbs = exp(T0); devbs_dvb = evbs / Nvtms; } T1 = evbs - 1.0; T2 = here->BSIM4IVjsmRev + here->BSIM4SslpRev * (vbs_jct - here->BSIM4vjsmRev); here->BSIM4gbs = devbs_dvb * T2 + T1 * here->BSIM4SslpRev + ckt->CKTgmin; here->BSIM4cbs = T1 * T2 + ckt->CKTgmin * vbs_jct; } else if (vbs_jct <= here->BSIM4vjsmFwd) { T0 = vbs_jct / Nvtms; if (T0 < -EXP_THRESHOLD) { evbs = MIN_EXP; devbs_dvb = 0.0; } else { evbs = exp(T0); devbs_dvb = evbs / Nvtms; } T1 = (model->BSIM4bvs + vbs_jct) / Nvtms; if (T1 > EXP_THRESHOLD) { T2 = MIN_EXP; T3 = 0.0; } else { T2 = exp(-T1); T3 = -T2 /Nvtms; } here->BSIM4gbs = SourceSatCurrent * (devbs_dvb - model->BSIM4xjbvs * T3) + ckt->CKTgmin; here->BSIM4cbs = SourceSatCurrent * (evbs + here->BSIM4XExpBVS - 1.0 - model->BSIM4xjbvs * T2) + ckt->CKTgmin * vbs_jct; } else { here->BSIM4gbs = here->BSIM4SslpFwd + ckt->CKTgmin; here->BSIM4cbs = here->BSIM4IVjsmFwd + here->BSIM4SslpFwd * (vbs_jct - here->BSIM4vjsmFwd) + ckt->CKTgmin * vbs_jct; } break; default: break; } } Nvtmd = model->BSIM4vtm * model->BSIM4DjctEmissionCoeff; /* if ((here->BSIM4Adeff <= 0.0) && (here->BSIM4Pdeff <= 0.0)) { DrainSatCurrent = 1.0e-14; } v4.7 */ if ((here->BSIM4Adeff <= 0.0) && (here->BSIM4Pdeff <= 0.0)) { DrainSatCurrent = 0.0; } else { DrainSatCurrent = here->BSIM4Adeff * model->BSIM4DjctTempSatCurDensity + here->BSIM4Pdeff * model->BSIM4DjctSidewallTempSatCurDensity + pParam->BSIM4weffCJ * here->BSIM4nf * model->BSIM4DjctGateSidewallTempSatCurDensity; } if (DrainSatCurrent <= 0.0) { here->BSIM4gbd = ckt->CKTgmin; here->BSIM4cbd = here->BSIM4gbd * vbd_jct; } else { switch(model->BSIM4dioMod) { case 0: evbd = exp(vbd_jct / Nvtmd); T1 = model->BSIM4xjbvd * exp(-(model->BSIM4bvd + vbd_jct) / Nvtmd); /* WDLiu: Magic T1 in this form; different from BSIM4 beta. */ here->BSIM4gbd = DrainSatCurrent * (evbd + T1) / Nvtmd + ckt->CKTgmin; here->BSIM4cbd = DrainSatCurrent * (evbd + here->BSIM4XExpBVD - T1 - 1.0) + ckt->CKTgmin * vbd_jct; break; case 1: T2 = vbd_jct / Nvtmd; if (T2 < -EXP_THRESHOLD) { here->BSIM4gbd = ckt->CKTgmin; here->BSIM4cbd = DrainSatCurrent * (MIN_EXP - 1.0) + ckt->CKTgmin * vbd_jct; } else if (vbd_jct <= here->BSIM4vjdmFwd) { evbd = exp(T2); here->BSIM4gbd = DrainSatCurrent * evbd / Nvtmd + ckt->CKTgmin; here->BSIM4cbd = DrainSatCurrent * (evbd - 1.0) + ckt->CKTgmin * vbd_jct; } else { T0 = here->BSIM4IVjdmFwd / Nvtmd; here->BSIM4gbd = T0 + ckt->CKTgmin; here->BSIM4cbd = here->BSIM4IVjdmFwd - DrainSatCurrent + T0 * (vbd_jct - here->BSIM4vjdmFwd) + ckt->CKTgmin * vbd_jct; } break; case 2: if (vbd_jct < here->BSIM4vjdmRev) { T0 = vbd_jct / Nvtmd; if (T0 < -EXP_THRESHOLD) { evbd = MIN_EXP; devbd_dvb = 0.0; } else { evbd = exp(T0); devbd_dvb = evbd / Nvtmd; } T1 = evbd - 1.0; T2 = here->BSIM4IVjdmRev + here->BSIM4DslpRev * (vbd_jct - here->BSIM4vjdmRev); here->BSIM4gbd = devbd_dvb * T2 + T1 * here->BSIM4DslpRev + ckt->CKTgmin; here->BSIM4cbd = T1 * T2 + ckt->CKTgmin * vbd_jct; } else if (vbd_jct <= here->BSIM4vjdmFwd) { T0 = vbd_jct / Nvtmd; if (T0 < -EXP_THRESHOLD) { evbd = MIN_EXP; devbd_dvb = 0.0; } else { evbd = exp(T0); devbd_dvb = evbd / Nvtmd; } T1 = (model->BSIM4bvd + vbd_jct) / Nvtmd; if (T1 > EXP_THRESHOLD) { T2 = MIN_EXP; T3 = 0.0; } else { T2 = exp(-T1); T3 = -T2 /Nvtmd; } here->BSIM4gbd = DrainSatCurrent * (devbd_dvb - model->BSIM4xjbvd * T3) + ckt->CKTgmin; here->BSIM4cbd = DrainSatCurrent * (evbd + here->BSIM4XExpBVD - 1.0 - model->BSIM4xjbvd * T2) + ckt->CKTgmin * vbd_jct; } else { here->BSIM4gbd = here->BSIM4DslpFwd + ckt->CKTgmin; here->BSIM4cbd = here->BSIM4IVjdmFwd + here->BSIM4DslpFwd * (vbd_jct - here->BSIM4vjdmFwd) + ckt->CKTgmin * vbd_jct; } break; default: break; } } /* trap-assisted tunneling and recombination current for reverse bias */ Nvtmrssws = model->BSIM4vtm0 * model->BSIM4njtsswstemp; Nvtmrsswgs = model->BSIM4vtm0 * model->BSIM4njtsswgstemp; Nvtmrss = model->BSIM4vtm0 * model->BSIM4njtsstemp; Nvtmrsswd = model->BSIM4vtm0 * model->BSIM4njtsswdtemp; Nvtmrsswgd = model->BSIM4vtm0 * model->BSIM4njtsswgdtemp; Nvtmrsd = model->BSIM4vtm0 * model->BSIM4njtsdtemp; if ((model->BSIM4vtss - vbs_jct) < (model->BSIM4vtss * 1e-3)) { T9 = 1.0e3; T0 = - vbs_jct / Nvtmrss * T9; DEXP(T0, T1, T10); dT1_dVb = T10 / Nvtmrss * T9; } else { T9 = 1.0 / (model->BSIM4vtss - vbs_jct); T0 = -vbs_jct / Nvtmrss * model->BSIM4vtss * T9; dT0_dVb = model->BSIM4vtss / Nvtmrss * (T9 + vbs_jct * T9 * T9) ; DEXP(T0, T1, T10); dT1_dVb = T10 * dT0_dVb; } if ((model->BSIM4vtsd - vbd_jct) < (model->BSIM4vtsd * 1e-3) ) { T9 = 1.0e3; T0 = -vbd_jct / Nvtmrsd * T9; DEXP(T0, T2, T10); dT2_dVb = T10 / Nvtmrsd * T9; } else { T9 = 1.0 / (model->BSIM4vtsd - vbd_jct); T0 = -vbd_jct / Nvtmrsd * model->BSIM4vtsd * T9; dT0_dVb = model->BSIM4vtsd / Nvtmrsd * (T9 + vbd_jct * T9 * T9) ; DEXP(T0, T2, T10); dT2_dVb = T10 * dT0_dVb; } if ((model->BSIM4vtssws - vbs_jct) < (model->BSIM4vtssws * 1e-3) ) { T9 = 1.0e3; T0 = -vbs_jct / Nvtmrssws * T9; DEXP(T0, T3, T10); dT3_dVb = T10 / Nvtmrssws * T9; } else { T9 = 1.0 / (model->BSIM4vtssws - vbs_jct); T0 = -vbs_jct / Nvtmrssws * model->BSIM4vtssws * T9; dT0_dVb = model->BSIM4vtssws / Nvtmrssws * (T9 + vbs_jct * T9 * T9) ; DEXP(T0, T3, T10); dT3_dVb = T10 * dT0_dVb; } if ((model->BSIM4vtsswd - vbd_jct) < (model->BSIM4vtsswd * 1e-3) ) { T9 = 1.0e3; T0 = -vbd_jct / Nvtmrsswd * T9; DEXP(T0, T4, T10); dT4_dVb = T10 / Nvtmrsswd * T9; } else { T9 = 1.0 / (model->BSIM4vtsswd - vbd_jct); T0 = -vbd_jct / Nvtmrsswd * model->BSIM4vtsswd * T9; dT0_dVb = model->BSIM4vtsswd / Nvtmrsswd * (T9 + vbd_jct * T9 * T9) ; DEXP(T0, T4, T10); dT4_dVb = T10 * dT0_dVb; } if ((model->BSIM4vtsswgs - vbs_jct) < (model->BSIM4vtsswgs * 1e-3) ) { T9 = 1.0e3; T0 = -vbs_jct / Nvtmrsswgs * T9; DEXP(T0, T5, T10); dT5_dVb = T10 / Nvtmrsswgs * T9; } else { T9 = 1.0 / (model->BSIM4vtsswgs - vbs_jct); T0 = -vbs_jct / Nvtmrsswgs * model->BSIM4vtsswgs * T9; dT0_dVb = model->BSIM4vtsswgs / Nvtmrsswgs * (T9 + vbs_jct * T9 * T9) ; DEXP(T0, T5, T10); dT5_dVb = T10 * dT0_dVb; } if ((model->BSIM4vtsswgd - vbd_jct) < (model->BSIM4vtsswgd * 1e-3) ) { T9 = 1.0e3; T0 = -vbd_jct / Nvtmrsswgd * T9; DEXP(T0, T6, T10); dT6_dVb = T10 / Nvtmrsswgd * T9; } else { T9 = 1.0 / (model->BSIM4vtsswgd - vbd_jct); T0 = -vbd_jct / Nvtmrsswgd * model->BSIM4vtsswgd * T9; dT0_dVb = model->BSIM4vtsswgd / Nvtmrsswgd * (T9 + vbd_jct * T9 * T9) ; DEXP(T0, T6, T10); dT6_dVb = T10 * dT0_dVb; } here->BSIM4gbs += here->BSIM4SjctTempRevSatCur * dT1_dVb + here->BSIM4SswTempRevSatCur * dT3_dVb + here->BSIM4SswgTempRevSatCur * dT5_dVb; here->BSIM4cbs -= here->BSIM4SjctTempRevSatCur * (T1 - 1.0) + here->BSIM4SswTempRevSatCur * (T3 - 1.0) + here->BSIM4SswgTempRevSatCur * (T5 - 1.0); here->BSIM4gbd += here->BSIM4DjctTempRevSatCur * dT2_dVb + here->BSIM4DswTempRevSatCur * dT4_dVb + here->BSIM4DswgTempRevSatCur * dT6_dVb; here->BSIM4cbd -= here->BSIM4DjctTempRevSatCur * (T2 - 1.0) + here->BSIM4DswTempRevSatCur * (T4 - 1.0) + here->BSIM4DswgTempRevSatCur * (T6 - 1.0); /* End of diode DC model */ if (vds >= 0.0) { here->BSIM4mode = 1; Vds = vds; Vgs = vgs; Vbs = vbs; Vdb = vds - vbs; /* WDLiu: for GIDL */ } else { here->BSIM4mode = -1; Vds = -vds; Vgs = vgd; Vbs = vbd; Vdb = -vbs; } /* dunga */ if(model->BSIM4mtrlMod) { epsrox = 3.9; toxe = model->BSIM4eot; epssub = EPS0 * model->BSIM4epsrsub; } else { epsrox = model->BSIM4epsrox; toxe = model->BSIM4toxe; epssub = EPSSI; } T0 = Vbs - here->BSIM4vbsc - 0.001; T1 = sqrt(T0 * T0 - 0.004 * here->BSIM4vbsc); if (T0 >= 0.0) { Vbseff = here->BSIM4vbsc + 0.5 * (T0 + T1); dVbseff_dVb = 0.5 * (1.0 + T0 / T1); } else { T2 = -0.002 / (T1 - T0); Vbseff = here->BSIM4vbsc * (1.0 + T2); dVbseff_dVb = T2 * here->BSIM4vbsc / T1; } /* JX: Correction to forward body bias */ T9 = 0.95 * pParam->BSIM4phi; T0 = T9 - Vbseff - 0.001; T1 = sqrt(T0 * T0 + 0.004 * T9); Vbseff = T9 - 0.5 * (T0 + T1); dVbseff_dVb *= 0.5 * (1.0 + T0 / T1); Phis = pParam->BSIM4phi - Vbseff; dPhis_dVb = -1.0; sqrtPhis = sqrt(Phis); dsqrtPhis_dVb = -0.5 / sqrtPhis; Xdep = pParam->BSIM4Xdep0 * sqrtPhis / pParam->BSIM4sqrtPhi; dXdep_dVb = (pParam->BSIM4Xdep0 / pParam->BSIM4sqrtPhi) * dsqrtPhis_dVb; Leff = pParam->BSIM4leff; Vtm = model->BSIM4vtm; Vtm0 = model->BSIM4vtm0; /* Vth Calculation */ T3 = sqrt(Xdep); V0 = pParam->BSIM4vbi - pParam->BSIM4phi; T0 = pParam->BSIM4dvt2 * Vbseff; if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->BSIM4dvt2; } else { T4 = 1.0 / (3.0 + 8.0 * T0); T1 = (1.0 + 3.0 * T0) * T4; T2 = pParam->BSIM4dvt2 * T4 * T4; } lt1 = model->BSIM4factor1 * T3 * T1; dlt1_dVb = model->BSIM4factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2); T0 = pParam->BSIM4dvt2w * Vbseff; if (T0 >= - 0.5) { T1 = 1.0 + T0; T2 = pParam->BSIM4dvt2w; } else { T4 = 1.0 / (3.0 + 8.0 * T0); T1 = (1.0 + 3.0 * T0) * T4; T2 = pParam->BSIM4dvt2w * T4 * T4; } ltw = model->BSIM4factor1 * T3 * T1; dltw_dVb = model->BSIM4factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2); T0 = pParam->BSIM4dvt1 * Leff / lt1; if (T0 < EXP_THRESHOLD) { T1 = exp(T0); T2 = T1 - 1.0; T3 = T2 * T2; T4 = T3 + 2.0 * T1 * MIN_EXP; Theta0 = T1 / T4; dT1_dVb = -T0 * T1 * dlt1_dVb / lt1; dTheta0_dVb = dT1_dVb * (T4 - 2.0 * T1 * (T2 + MIN_EXP)) / T4 / T4; } else { Theta0 = 1.0 / (MAX_EXP - 2.0); /* 3.0 * MIN_EXP omitted */ dTheta0_dVb = 0.0; } here->BSIM4thetavth = pParam->BSIM4dvt0 * Theta0; Delt_vth = here->BSIM4thetavth * V0; dDelt_vth_dVb = pParam->BSIM4dvt0 * dTheta0_dVb * V0; T0 = pParam->BSIM4dvt1w * pParam->BSIM4weff * Leff / ltw; if (T0 < EXP_THRESHOLD) { T1 = exp(T0); T2 = T1 - 1.0; T3 = T2 * T2; T4 = T3 + 2.0 * T1 * MIN_EXP; T5 = T1 / T4; dT1_dVb = -T0 * T1 * dltw_dVb / ltw; dT5_dVb = dT1_dVb * (T4 - 2.0 * T1 * (T2 + MIN_EXP)) / T4 / T4; } else { T5 = 1.0 / (MAX_EXP - 2.0); /* 3.0 * MIN_EXP omitted */ dT5_dVb = 0.0; } T0 = pParam->BSIM4dvt0w * T5; T2 = T0 * V0; dT2_dVb = pParam->BSIM4dvt0w * dT5_dVb * V0; TempRatio = ckt->CKTtemp / model->BSIM4tnom - 1.0; T0 = sqrt(1.0 + pParam->BSIM4lpe0 / Leff); T1 = pParam->BSIM4k1ox * (T0 - 1.0) * pParam->BSIM4sqrtPhi + (pParam->BSIM4kt1 + pParam->BSIM4kt1l / Leff + pParam->BSIM4kt2 * Vbseff) * TempRatio; Vth_NarrowW = toxe * pParam->BSIM4phi / (pParam->BSIM4weff + pParam->BSIM4w0); T3 = here->BSIM4eta0 + pParam->BSIM4etab * Vbseff; if (T3 < 1.0e-4) { T9 = 1.0 / (3.0 - 2.0e4 * T3); T3 = (2.0e-4 - T3) * T9; T4 = T9 * T9; } else { T4 = 1.0; } dDIBL_Sft_dVd = T3 * pParam->BSIM4theta0vb0; DIBL_Sft = dDIBL_Sft_dVd * Vds; Lpe_Vb = sqrt(1.0 + pParam->BSIM4lpeb / Leff); Vth = model->BSIM4type * here->BSIM4vth0 + (pParam->BSIM4k1ox * sqrtPhis - pParam->BSIM4k1 * pParam->BSIM4sqrtPhi) * Lpe_Vb - here->BSIM4k2ox * Vbseff - Delt_vth - T2 + (pParam->BSIM4k3 + pParam->BSIM4k3b * Vbseff) * Vth_NarrowW + T1 - DIBL_Sft; dVth_dVb = Lpe_Vb * pParam->BSIM4k1ox * dsqrtPhis_dVb - here->BSIM4k2ox - dDelt_vth_dVb - dT2_dVb + pParam->BSIM4k3b * Vth_NarrowW - pParam->BSIM4etab * Vds * pParam->BSIM4theta0vb0 * T4 + pParam->BSIM4kt2 * TempRatio; dVth_dVd = -dDIBL_Sft_dVd; /* Calculate n */ tmp1 = epssub / Xdep; here->BSIM4nstar = model->BSIM4vtm / Charge_q * (model->BSIM4coxe + tmp1 + pParam->BSIM4cit); tmp2 = pParam->BSIM4nfactor * tmp1; tmp3 = pParam->BSIM4cdsc + pParam->BSIM4cdscb * Vbseff + pParam->BSIM4cdscd * Vds; tmp4 = (tmp2 + tmp3 * Theta0 + pParam->BSIM4cit) / model->BSIM4coxe; if (tmp4 >= -0.5) { n = 1.0 + tmp4; dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb + pParam->BSIM4cdscb * Theta0) / model->BSIM4coxe; dn_dVd = pParam->BSIM4cdscd * Theta0 / model->BSIM4coxe; } else { T0 = 1.0 / (3.0 + 8.0 * tmp4); n = (1.0 + 3.0 * tmp4) * T0; T0 *= T0; dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb + pParam->BSIM4cdscb * Theta0) / model->BSIM4coxe * T0; dn_dVd = pParam->BSIM4cdscd * Theta0 / model->BSIM4coxe * T0; } /* Vth correction for Pocket implant */ if (pParam->BSIM4dvtp0 > 0.0) { T0 = -pParam->BSIM4dvtp1 * Vds; if (T0 < -EXP_THRESHOLD) { T2 = MIN_EXP; dT2_dVd = 0.0; } else { T2 = exp(T0); dT2_dVd = -pParam->BSIM4dvtp1 * T2; } T3 = Leff + pParam->BSIM4dvtp0 * (1.0 + T2); dT3_dVd = pParam->BSIM4dvtp0 * dT2_dVd; if (model->BSIM4tempMod < 2) { T4 = Vtm * log(Leff / T3); dT4_dVd = -Vtm * dT3_dVd / T3; } else { T4 = model->BSIM4vtm0 * log(Leff / T3); dT4_dVd = -model->BSIM4vtm0 * dT3_dVd / T3; } dDITS_Sft_dVd = dn_dVd * T4 + n * dT4_dVd; dDITS_Sft_dVb = T4 * dn_dVb; Vth -= n * T4; dVth_dVd -= dDITS_Sft_dVd; dVth_dVb -= dDITS_Sft_dVb; } /* v4.7 DITS_SFT2 */ if ((pParam->BSIM4dvtp4 == 0.0) || (pParam->BSIM4dvtp2factor == 0.0)) { T0 = 0.0; DITS_Sft2 = 0.0; } else { //T0 = exp(2.0 * pParam->BSIM4dvtp4 * Vds); /* beta code */ T1 = 2.0 * pParam->BSIM4dvtp4 * Vds; DEXP(T1, T0, T10); DITS_Sft2 = pParam->BSIM4dvtp2factor * (T0-1) / (T0+1); //dDITS_Sft2_dVd = pParam->BSIM4dvtp2factor * pParam->BSIM4dvtp4 * 4.0 * T0 / ((T0+1) * (T0+1)); /* beta code */ dDITS_Sft2_dVd = pParam->BSIM4dvtp2factor * pParam->BSIM4dvtp4 * 4.0 * T10 / ((T0+1) * (T0+1)); Vth -= DITS_Sft2; dVth_dVd -= dDITS_Sft2_dVd; } here->BSIM4von = Vth; /* Poly Gate Si Depletion Effect */ T0 = here->BSIM4vfb + pParam->BSIM4phi; if(model->BSIM4mtrlMod == 0) T1 = EPSSI; else T1 = model->BSIM4epsrgate * EPS0; BSIM4polyDepletion(T0, pParam->BSIM4ngate, T1, model->BSIM4coxe, vgs, &vgs_eff, &dvgs_eff_dvg); BSIM4polyDepletion(T0, pParam->BSIM4ngate, T1, model->BSIM4coxe, vgd, &vgd_eff, &dvgd_eff_dvg); if(here->BSIM4mode>0) { Vgs_eff = vgs_eff; dVgs_eff_dVg = dvgs_eff_dvg; } else { Vgs_eff = vgd_eff; dVgs_eff_dVg = dvgd_eff_dvg; } here->BSIM4vgs_eff = vgs_eff; here->BSIM4vgd_eff = vgd_eff; here->BSIM4dvgs_eff_dvg = dvgs_eff_dvg; here->BSIM4dvgd_eff_dvg = dvgd_eff_dvg; Vgst = Vgs_eff - Vth; /* Calculate Vgsteff */ T0 = n * Vtm; T1 = pParam->BSIM4mstar * Vgst; T2 = T1 / T0; if (T2 > EXP_THRESHOLD) { T10 = T1; dT10_dVg = pParam->BSIM4mstar * dVgs_eff_dVg; dT10_dVd = -dVth_dVd * pParam->BSIM4mstar; dT10_dVb = -dVth_dVb * pParam->BSIM4mstar; } else if (T2 < -EXP_THRESHOLD) { T10 = Vtm * log(1.0 + MIN_EXP); dT10_dVg = 0.0; dT10_dVd = T10 * dn_dVd; dT10_dVb = T10 * dn_dVb; T10 *= n; } else { ExpVgst = exp(T2); T3 = Vtm * log(1.0 + ExpVgst); T10 = n * T3; dT10_dVg = pParam->BSIM4mstar * ExpVgst / (1.0 + ExpVgst); dT10_dVb = T3 * dn_dVb - dT10_dVg * (dVth_dVb + Vgst * dn_dVb / n); dT10_dVd = T3 * dn_dVd - dT10_dVg * (dVth_dVd + Vgst * dn_dVd / n); dT10_dVg *= dVgs_eff_dVg; } T1 = pParam->BSIM4voffcbn - (1.0 - pParam->BSIM4mstar) * Vgst; T2 = T1 / T0; if (T2 < -EXP_THRESHOLD) { T3 = model->BSIM4coxe * MIN_EXP / pParam->BSIM4cdep0; T9 = pParam->BSIM4mstar + T3 * n; dT9_dVg = 0.0; dT9_dVd = dn_dVd * T3; dT9_dVb = dn_dVb * T3; } else if (T2 > EXP_THRESHOLD) { T3 = model->BSIM4coxe * MAX_EXP / pParam->BSIM4cdep0; T9 = pParam->BSIM4mstar + T3 * n; dT9_dVg = 0.0; dT9_dVd = dn_dVd * T3; dT9_dVb = dn_dVb * T3; } else { ExpVgst = exp(T2); T3 = model->BSIM4coxe / pParam->BSIM4cdep0; T4 = T3 * ExpVgst; T5 = T1 * T4 / T0; T9 = pParam->BSIM4mstar + n * T4; dT9_dVg = T3 * (pParam->BSIM4mstar - 1.0) * ExpVgst / Vtm; dT9_dVb = T4 * dn_dVb - dT9_dVg * dVth_dVb - T5 * dn_dVb; dT9_dVd = T4 * dn_dVd - dT9_dVg * dVth_dVd - T5 * dn_dVd; dT9_dVg *= dVgs_eff_dVg; } here->BSIM4Vgsteff = Vgsteff = T10 / T9; T11 = T9 * T9; dVgsteff_dVg = (T9 * dT10_dVg - T10 * dT9_dVg) / T11; dVgsteff_dVd = (T9 * dT10_dVd - T10 * dT9_dVd) / T11; dVgsteff_dVb = (T9 * dT10_dVb - T10 * dT9_dVb) / T11; /* Calculate Effective Channel Geometry */ T9 = sqrtPhis - pParam->BSIM4sqrtPhi; Weff = pParam->BSIM4weff - 2.0 * (pParam->BSIM4dwg * Vgsteff + pParam->BSIM4dwb * T9); dWeff_dVg = -2.0 * pParam->BSIM4dwg; dWeff_dVb = -2.0 * pParam->BSIM4dwb * dsqrtPhis_dVb; if (Weff < 2.0e-8) /* to avoid the discontinuity problem due to Weff*/ { T0 = 1.0 / (6.0e-8 - 2.0 * Weff); Weff = 2.0e-8 * (4.0e-8 - Weff) * T0; T0 *= T0 * 4.0e-16; dWeff_dVg *= T0; dWeff_dVb *= T0; } if (model->BSIM4rdsMod == 1) Rds = dRds_dVg = dRds_dVb = 0.0; else { T0 = 1.0 + pParam->BSIM4prwg * Vgsteff; dT0_dVg = -pParam->BSIM4prwg / T0 / T0; T1 = pParam->BSIM4prwb * T9; dT1_dVb = pParam->BSIM4prwb * dsqrtPhis_dVb; T2 = 1.0 / T0 + T1; T3 = T2 + sqrt(T2 * T2 + 0.01); /* 0.01 = 4.0 * 0.05 * 0.05 */ dT3_dVg = 1.0 + T2 / (T3 - T2); dT3_dVb = dT3_dVg * dT1_dVb; dT3_dVg *= dT0_dVg; T4 = pParam->BSIM4rds0 * 0.5; Rds = pParam->BSIM4rdswmin + T3 * T4; dRds_dVg = T4 * dT3_dVg; dRds_dVb = T4 * dT3_dVb; if (Rds > 0.0) here->BSIM4grdsw = 1.0 / Rds* here->BSIM4nf; /*4.6.2*/ else here->BSIM4grdsw = 0.0; } /* Calculate Abulk */ T9 = 0.5 * pParam->BSIM4k1ox * Lpe_Vb / sqrtPhis; T1 = T9 + here->BSIM4k2ox - pParam->BSIM4k3b * Vth_NarrowW; dT1_dVb = -T9 / sqrtPhis * dsqrtPhis_dVb; T9 = sqrt(pParam->BSIM4xj * Xdep); tmp1 = Leff + 2.0 * T9; T5 = Leff / tmp1; tmp2 = pParam->BSIM4a0 * T5; tmp3 = pParam->BSIM4weff + pParam->BSIM4b1; tmp4 = pParam->BSIM4b0 / tmp3; T2 = tmp2 + tmp4; dT2_dVb = -T9 / tmp1 / Xdep * dXdep_dVb; T6 = T5 * T5; T7 = T5 * T6; Abulk0 = 1.0 + T1 * T2; dAbulk0_dVb = T1 * tmp2 * dT2_dVb + T2 * dT1_dVb; T8 = pParam->BSIM4ags * pParam->BSIM4a0 * T7; dAbulk_dVg = -T1 * T8; Abulk = Abulk0 + dAbulk_dVg * Vgsteff; dAbulk_dVb = dAbulk0_dVb - T8 * Vgsteff * (dT1_dVb + 3.0 * T1 * dT2_dVb); if (Abulk0 < 0.1) /* added to avoid the problems caused by Abulk0 */ { T9 = 1.0 / (3.0 - 20.0 * Abulk0); Abulk0 = (0.2 - Abulk0) * T9; dAbulk0_dVb *= T9 * T9; } if (Abulk < 0.1) { T9 = 1.0 / (3.0 - 20.0 * Abulk); Abulk = (0.2 - Abulk) * T9; T10 = T9 * T9; dAbulk_dVb *= T10; dAbulk_dVg *= T10; } here->BSIM4Abulk = Abulk; T2 = pParam->BSIM4keta * Vbseff; if (T2 >= -0.9) { T0 = 1.0 / (1.0 + T2); dT0_dVb = -pParam->BSIM4keta * T0 * T0; } else { T1 = 1.0 / (0.8 + T2); T0 = (17.0 + 20.0 * T2) * T1; dT0_dVb = -pParam->BSIM4keta * T1 * T1; } dAbulk_dVg *= T0; dAbulk_dVb = dAbulk_dVb * T0 + Abulk * dT0_dVb; dAbulk0_dVb = dAbulk0_dVb * T0 + Abulk0 * dT0_dVb; Abulk *= T0; Abulk0 *= T0; /* Mobility calculation */ if (model->BSIM4mtrlMod && model->BSIM4mtrlCompatMod == 0) T14 = 2.0 * model->BSIM4type *(model->BSIM4phig - model->BSIM4easub - 0.5*model->BSIM4Eg0 + 0.45); else T14 = 0.0; if (model->BSIM4mobMod == 0) { T0 = Vgsteff + Vth + Vth - T14; T2 = pParam->BSIM4ua + pParam->BSIM4uc * Vbseff; T3 = T0 / toxe; T12 = sqrt(Vth * Vth + 0.0001); T9 = 1.0/(Vgsteff + 2*T12); T10 = T9*toxe; T8 = pParam->BSIM4ud * T10 * T10 * Vth; T6 = T8 * Vth; T5 = T3 * (T2 + pParam->BSIM4ub * T3) + T6; T7 = - 2.0 * T6 * T9; T11 = T7 * Vth/T12; dDenomi_dVg = (T2 + 2.0 * pParam->BSIM4ub * T3) / toxe; T13 = 2.0 * (dDenomi_dVg + T11 + T8); dDenomi_dVd = T13 * dVth_dVd; dDenomi_dVb = T13 * dVth_dVb + pParam->BSIM4uc * T3; dDenomi_dVg+= T7; } else if (model->BSIM4mobMod == 1) { T0 = Vgsteff + Vth + Vth - T14; T2 = 1.0 + pParam->BSIM4uc * Vbseff; T3 = T0 / toxe; T4 = T3 * (pParam->BSIM4ua + pParam->BSIM4ub * T3); T12 = sqrt(Vth * Vth + 0.0001); T9 = 1.0/(Vgsteff + 2*T12); T10 = T9*toxe; T8 = pParam->BSIM4ud * T10 * T10 * Vth; T6 = T8 * Vth; T5 = T4 * T2 + T6; T7 = - 2.0 * T6 * T9; T11 = T7 * Vth/T12; dDenomi_dVg = (pParam->BSIM4ua + 2.0 * pParam->BSIM4ub * T3) * T2 / toxe; T13 = 2.0 * (dDenomi_dVg + T11 + T8); dDenomi_dVd = T13 * dVth_dVd; dDenomi_dVb = T13 * dVth_dVb + pParam->BSIM4uc * T4; dDenomi_dVg+= T7; } else if (model->BSIM4mobMod == 2) { T0 = (Vgsteff + here->BSIM4vtfbphi1) / toxe; T1 = exp(pParam->BSIM4eu * log(T0)); dT1_dVg = T1 * pParam->BSIM4eu / T0 / toxe; T2 = pParam->BSIM4ua + pParam->BSIM4uc * Vbseff; T12 = sqrt(Vth * Vth + 0.0001); T9 = 1.0/(Vgsteff + 2*T12); T10 = T9*toxe; T8 = pParam->BSIM4ud * T10 * T10 * Vth; T6 = T8 * Vth; T5 = T1 * T2 + T6; T7 = - 2.0 * T6 * T9; T11 = T7 * Vth/T12; dDenomi_dVg = T2 * dT1_dVg + T7; T13 = 2.0 * (T11 + T8); dDenomi_dVd = T13 * dVth_dVd; dDenomi_dVb = T13 * dVth_dVb + T1 * pParam->BSIM4uc; } else if (model->BSIM4mobMod == 4) /* Synopsys 08/30/2013 add */ { T0 = Vgsteff + here->BSIM4vtfbphi1 - T14; T2 = pParam->BSIM4ua + pParam->BSIM4uc * Vbseff; T3 = T0 / toxe; T12 = sqrt(here->BSIM4vtfbphi1*here->BSIM4vtfbphi1 + 0.0001); T9 = 1.0/(Vgsteff + 2*T12); T10 = T9*toxe; T8 = pParam->BSIM4ud * T10 * T10 * here->BSIM4vtfbphi1; T6 = T8 * here->BSIM4vtfbphi1; T5 = T3 * (T2 + pParam->BSIM4ub * T3) + T6; T7 = - 2.0 * T6 * T9; dDenomi_dVg = (T2 + 2.0 * pParam->BSIM4ub * T3) / toxe; dDenomi_dVd = 0.0; dDenomi_dVb = pParam->BSIM4uc * T3; dDenomi_dVg+= T7; } else if (model->BSIM4mobMod == 5) /* Synopsys 08/30/2013 add */ { T0 = Vgsteff + here->BSIM4vtfbphi1 - T14; T2 = 1.0 + pParam->BSIM4uc * Vbseff; T3 = T0 / toxe; T4 = T3 * (pParam->BSIM4ua + pParam->BSIM4ub * T3); T12 = sqrt(here->BSIM4vtfbphi1 * here->BSIM4vtfbphi1 + 0.0001); T9 = 1.0/(Vgsteff + 2*T12); T10 = T9*toxe; T8 = pParam->BSIM4ud * T10 * T10 * here->BSIM4vtfbphi1; T6 = T8 * here->BSIM4vtfbphi1; T5 = T4 * T2 + T6; T7 = - 2.0 * T6 * T9; dDenomi_dVg = (pParam->BSIM4ua + 2.0 * pParam->BSIM4ub * T3) * T2 / toxe; dDenomi_dVd = 0.0; dDenomi_dVb = pParam->BSIM4uc * T4; dDenomi_dVg+= T7; } else if (model->BSIM4mobMod == 6) /* Synopsys 08/30/2013 modify */ { T0 = (Vgsteff + here->BSIM4vtfbphi1) / toxe; T1 = exp(pParam->BSIM4eu * log(T0)); dT1_dVg = T1 * pParam->BSIM4eu / T0 / toxe; T2 = pParam->BSIM4ua + pParam->BSIM4uc * Vbseff; T12 = sqrt(here->BSIM4vtfbphi1 * here->BSIM4vtfbphi1 + 0.0001); T9 = 1.0/(Vgsteff + 2*T12); T10 = T9*toxe; T8 = pParam->BSIM4ud * T10 * T10 * here->BSIM4vtfbphi1; T6 = T8 * here->BSIM4vtfbphi1; T5 = T1 * T2 + T6; T7 = - 2.0 * T6 * T9; dDenomi_dVg = T2 * dT1_dVg + T7; dDenomi_dVd = 0; dDenomi_dVb = T1 * pParam->BSIM4uc; } /*high K mobility*/ else { /*univsersal mobility*/ T0 = (Vgsteff + here->BSIM4vtfbphi1)* 1.0e-8 / toxe/6.0; T1 = exp(pParam->BSIM4eu * log(T0)); dT1_dVg = T1 * pParam->BSIM4eu * 1.0e-8/ T0 / toxe/6.0; T2 = pParam->BSIM4ua + pParam->BSIM4uc * Vbseff; /*Coulombic*/ VgsteffVth = pParam->BSIM4VgsteffVth; T10 = exp(pParam->BSIM4ucs * log(0.5 + 0.5 * Vgsteff/VgsteffVth)); T11 = pParam->BSIM4ud/T10; dT11_dVg = - 0.5 * pParam->BSIM4ucs * T11 /(0.5 + 0.5*Vgsteff/VgsteffVth)/VgsteffVth; dDenomi_dVg = T2 * dT1_dVg + dT11_dVg; dDenomi_dVd = 0.0; dDenomi_dVb = T1 * pParam->BSIM4uc; T5 = T1 * T2 + T11; } if (T5 >= -0.8) { Denomi = 1.0 + T5; } else { T9 = 1.0 / (7.0 + 10.0 * T5); Denomi = (0.6 + T5) * T9; T9 *= T9; dDenomi_dVg *= T9; dDenomi_dVd *= T9; dDenomi_dVb *= T9; } here->BSIM4ueff = ueff = here->BSIM4u0temp / Denomi; T9 = -ueff / Denomi; dueff_dVg = T9 * dDenomi_dVg; dueff_dVd = T9 * dDenomi_dVd; dueff_dVb = T9 * dDenomi_dVb; /* Saturation Drain Voltage Vdsat */ WVCox = Weff * here->BSIM4vsattemp * model->BSIM4coxe; WVCoxRds = WVCox * Rds; Esat = 2.0 * here->BSIM4vsattemp / ueff; here->BSIM4EsatL = EsatL = Esat * Leff; T0 = -EsatL /ueff; dEsatL_dVg = T0 * dueff_dVg; dEsatL_dVd = T0 * dueff_dVd; dEsatL_dVb = T0 * dueff_dVb; /* Sqrt() */ a1 = pParam->BSIM4a1; if (a1 == 0.0) { Lambda = pParam->BSIM4a2; dLambda_dVg = 0.0; } else if (a1 > 0.0) { T0 = 1.0 - pParam->BSIM4a2; T1 = T0 - pParam->BSIM4a1 * Vgsteff - 0.0001; T2 = sqrt(T1 * T1 + 0.0004 * T0); Lambda = pParam->BSIM4a2 + T0 - 0.5 * (T1 + T2); dLambda_dVg = 0.5 * pParam->BSIM4a1 * (1.0 + T1 / T2); } else { T1 = pParam->BSIM4a2 + pParam->BSIM4a1 * Vgsteff - 0.0001; T2 = sqrt(T1 * T1 + 0.0004 * pParam->BSIM4a2); Lambda = 0.5 * (T1 + T2); dLambda_dVg = 0.5 * pParam->BSIM4a1 * (1.0 + T1 / T2); } Vgst2Vtm = Vgsteff + 2.0 * Vtm; if (Rds > 0) { tmp2 = dRds_dVg / Rds + dWeff_dVg / Weff; tmp3 = dRds_dVb / Rds + dWeff_dVb / Weff; } else { tmp2 = dWeff_dVg / Weff; tmp3 = dWeff_dVb / Weff; } if ((Rds == 0.0) && (Lambda == 1.0)) { T0 = 1.0 / (Abulk * EsatL + Vgst2Vtm); tmp1 = 0.0; T1 = T0 * T0; T2 = Vgst2Vtm * T0; T3 = EsatL * Vgst2Vtm; Vdsat = T3 * T0; dT0_dVg = -(Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 1.0) * T1; dT0_dVd = -(Abulk * dEsatL_dVd) * T1; dT0_dVb = -(Abulk * dEsatL_dVb + dAbulk_dVb * EsatL) * T1; dVdsat_dVg = T3 * dT0_dVg + T2 * dEsatL_dVg + EsatL * T0; dVdsat_dVd = T3 * dT0_dVd + T2 * dEsatL_dVd; dVdsat_dVb = T3 * dT0_dVb + T2 * dEsatL_dVb; } else { tmp1 = dLambda_dVg / (Lambda * Lambda); T9 = Abulk * WVCoxRds; T8 = Abulk * T9; T7 = Vgst2Vtm * T9; T6 = Vgst2Vtm * WVCoxRds; T0 = 2.0 * Abulk * (T9 - 1.0 + 1.0 / Lambda); dT0_dVg = 2.0 * (T8 * tmp2 - Abulk * tmp1 + (2.0 * T9 + 1.0 / Lambda - 1.0) * dAbulk_dVg); dT0_dVb = 2.0 * (T8 * (2.0 / Abulk * dAbulk_dVb + tmp3) + (1.0 / Lambda - 1.0) * dAbulk_dVb); dT0_dVd = 0.0; T1 = Vgst2Vtm * (2.0 / Lambda - 1.0) + Abulk * EsatL + 3.0 * T7; dT1_dVg = (2.0 / Lambda - 1.0) - 2.0 * Vgst2Vtm * tmp1 + Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 3.0 * (T9 + T7 * tmp2 + T6 * dAbulk_dVg); dT1_dVb = Abulk * dEsatL_dVb + EsatL * dAbulk_dVb + 3.0 * (T6 * dAbulk_dVb + T7 * tmp3); dT1_dVd = Abulk * dEsatL_dVd; T2 = Vgst2Vtm * (EsatL + 2.0 * T6); dT2_dVg = EsatL + Vgst2Vtm * dEsatL_dVg + T6 * (4.0 + 2.0 * Vgst2Vtm * tmp2); dT2_dVb = Vgst2Vtm * (dEsatL_dVb + 2.0 * T6 * tmp3); dT2_dVd = Vgst2Vtm * dEsatL_dVd; T3 = sqrt(T1 * T1 - 2.0 * T0 * T2); Vdsat = (T1 - T3) / T0; dT3_dVg = (T1 * dT1_dVg - 2.0 * (T0 * dT2_dVg + T2 * dT0_dVg)) / T3; dT3_dVd = (T1 * dT1_dVd - 2.0 * (T0 * dT2_dVd + T2 * dT0_dVd)) / T3; dT3_dVb = (T1 * dT1_dVb - 2.0 * (T0 * dT2_dVb + T2 * dT0_dVb)) / T3; dVdsat_dVg = (dT1_dVg - (T1 * dT1_dVg - dT0_dVg * T2 - T0 * dT2_dVg) / T3 - Vdsat * dT0_dVg) / T0; dVdsat_dVb = (dT1_dVb - (T1 * dT1_dVb - dT0_dVb * T2 - T0 * dT2_dVb) / T3 - Vdsat * dT0_dVb) / T0; dVdsat_dVd = (dT1_dVd - (T1 * dT1_dVd - T0 * dT2_dVd) / T3) / T0; } here->BSIM4vdsat = Vdsat; /* Calculate Vdseff */ T1 = Vdsat - Vds - pParam->BSIM4delta; dT1_dVg = dVdsat_dVg; dT1_dVd = dVdsat_dVd - 1.0; dT1_dVb = dVdsat_dVb; T2 = sqrt(T1 * T1 + 4.0 * pParam->BSIM4delta * Vdsat); T0 = T1 / T2; T9 = 2.0 * pParam->BSIM4delta; T3 = T9 / T2; dT2_dVg = T0 * dT1_dVg + T3 * dVdsat_dVg; dT2_dVd = T0 * dT1_dVd + T3 * dVdsat_dVd; dT2_dVb = T0 * dT1_dVb + T3 * dVdsat_dVb; if (T1 >= 0.0) { Vdseff = Vdsat - 0.5 * (T1 + T2); dVdseff_dVg = dVdsat_dVg - 0.5 * (dT1_dVg + dT2_dVg); dVdseff_dVd = dVdsat_dVd - 0.5 * (dT1_dVd + dT2_dVd); dVdseff_dVb = dVdsat_dVb - 0.5 * (dT1_dVb + dT2_dVb); } else { T4 = T9 / (T2 - T1); T5 = 1.0 - T4; T6 = Vdsat * T4 / (T2 - T1); Vdseff = Vdsat * T5; dVdseff_dVg = dVdsat_dVg * T5 + T6 * (dT2_dVg - dT1_dVg); dVdseff_dVd = dVdsat_dVd * T5 + T6 * (dT2_dVd - dT1_dVd); dVdseff_dVb = dVdsat_dVb * T5 + T6 * (dT2_dVb - dT1_dVb); } if (Vds == 0.0) { Vdseff = 0.0; dVdseff_dVg = 0.0; dVdseff_dVb = 0.0; } if (Vdseff > Vds) Vdseff = Vds; diffVds = Vds - Vdseff; here->BSIM4Vdseff = Vdseff; /* Velocity Overshoot */ if((model->BSIM4lambdaGiven) && (model->BSIM4lambda > 0.0) ) { T1 = Leff * ueff; T2 = pParam->BSIM4lambda / T1; T3 = -T2 / T1 * Leff; dT2_dVd = T3 * dueff_dVd; dT2_dVg = T3 * dueff_dVg; dT2_dVb = T3 * dueff_dVb; T5 = 1.0 / (Esat * pParam->BSIM4litl); T4 = -T5 / EsatL; dT5_dVg = dEsatL_dVg * T4; dT5_dVd = dEsatL_dVd * T4; dT5_dVb = dEsatL_dVb * T4; T6 = 1.0 + diffVds * T5; dT6_dVg = dT5_dVg * diffVds - dVdseff_dVg * T5; dT6_dVd = dT5_dVd * diffVds + (1.0 - dVdseff_dVd) * T5; dT6_dVb = dT5_dVb * diffVds - dVdseff_dVb * T5; T7 = 2.0 / (T6 * T6 + 1.0); T8 = 1.0 - T7; T9 = T6 * T7 * T7; dT8_dVg = T9 * dT6_dVg; dT8_dVd = T9 * dT6_dVd; dT8_dVb = T9 * dT6_dVb; T10 = 1.0 + T2 * T8; dT10_dVg = dT2_dVg * T8 + T2 * dT8_dVg; dT10_dVd = dT2_dVd * T8 + T2 * dT8_dVd; dT10_dVb = dT2_dVb * T8 + T2 * dT8_dVb; if(T10 == 1.0) dT10_dVg = dT10_dVd = dT10_dVb = 0.0; dEsatL_dVg *= T10; dEsatL_dVg += EsatL * dT10_dVg; dEsatL_dVd *= T10; dEsatL_dVd += EsatL * dT10_dVd; dEsatL_dVb *= T10; dEsatL_dVb += EsatL * dT10_dVb; EsatL *= T10; Esat = EsatL / Leff; /* bugfix by Wenwei Yang (4.6.4) */ here->BSIM4EsatL = EsatL; } /* Calculate Vasat */ tmp4 = 1.0 - 0.5 * Abulk * Vdsat / Vgst2Vtm; T9 = WVCoxRds * Vgsteff; T8 = T9 / Vgst2Vtm; T0 = EsatL + Vdsat + 2.0 * T9 * tmp4; T7 = 2.0 * WVCoxRds * tmp4; dT0_dVg = dEsatL_dVg + dVdsat_dVg + T7 * (1.0 + tmp2 * Vgsteff) - T8 * (Abulk * dVdsat_dVg - Abulk * Vdsat / Vgst2Vtm + Vdsat * dAbulk_dVg); dT0_dVb = dEsatL_dVb + dVdsat_dVb + T7 * tmp3 * Vgsteff - T8 * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb); dT0_dVd = dEsatL_dVd + dVdsat_dVd - T8 * Abulk * dVdsat_dVd; T9 = WVCoxRds * Abulk; T1 = 2.0 / Lambda - 1.0 + T9; dT1_dVg = -2.0 * tmp1 + WVCoxRds * (Abulk * tmp2 + dAbulk_dVg); dT1_dVb = dAbulk_dVb * WVCoxRds + T9 * tmp3; Vasat = T0 / T1; dVasat_dVg = (dT0_dVg - Vasat * dT1_dVg) / T1; dVasat_dVb = (dT0_dVb - Vasat * dT1_dVb) / T1; dVasat_dVd = dT0_dVd / T1; /* Calculate Idl first */ tmp1 = here->BSIM4vtfbphi2; tmp2 = 2.0e8 * here->BSIM4toxp; dT0_dVg = 1.0 / tmp2; T0 = (Vgsteff + tmp1) * dT0_dVg; tmp3 = exp(model->BSIM4bdos * 0.7 * log(T0)); T1 = 1.0 + tmp3; T2 = model->BSIM4bdos * 0.7 * tmp3 / T0; Tcen = model->BSIM4ados * 1.9e-9 / T1; dTcen_dVg = -Tcen * T2 * dT0_dVg / T1; Coxeff = epssub * here->BSIM4coxp / (epssub + here->BSIM4coxp * Tcen); here->BSIM4Coxeff = Coxeff; dCoxeff_dVg = -Coxeff * Coxeff * dTcen_dVg / epssub; CoxeffWovL = Coxeff * Weff / Leff; beta = ueff * CoxeffWovL; T3 = ueff / Leff; dbeta_dVg = CoxeffWovL * dueff_dVg + T3 * (Weff * dCoxeff_dVg + Coxeff * dWeff_dVg); dbeta_dVd = CoxeffWovL * dueff_dVd; dbeta_dVb = CoxeffWovL * dueff_dVb + T3 * Coxeff * dWeff_dVb; here->BSIM4AbovVgst2Vtm = Abulk / Vgst2Vtm; T0 = 1.0 - 0.5 * Vdseff * here->BSIM4AbovVgst2Vtm; dT0_dVg = -0.5 * (Abulk * dVdseff_dVg - Abulk * Vdseff / Vgst2Vtm + Vdseff * dAbulk_dVg) / Vgst2Vtm; dT0_dVd = -0.5 * Abulk * dVdseff_dVd / Vgst2Vtm; dT0_dVb = -0.5 * (Abulk * dVdseff_dVb + dAbulk_dVb * Vdseff) / Vgst2Vtm; fgche1 = Vgsteff * T0; dfgche1_dVg = Vgsteff * dT0_dVg + T0; dfgche1_dVd = Vgsteff * dT0_dVd; dfgche1_dVb = Vgsteff * dT0_dVb; T9 = Vdseff / EsatL; fgche2 = 1.0 + T9; dfgche2_dVg = (dVdseff_dVg - T9 * dEsatL_dVg) / EsatL; dfgche2_dVd = (dVdseff_dVd - T9 * dEsatL_dVd) / EsatL; dfgche2_dVb = (dVdseff_dVb - T9 * dEsatL_dVb) / EsatL; gche = beta * fgche1 / fgche2; dgche_dVg = (beta * dfgche1_dVg + fgche1 * dbeta_dVg - gche * dfgche2_dVg) / fgche2; dgche_dVd = (beta * dfgche1_dVd + fgche1 * dbeta_dVd - gche * dfgche2_dVd) / fgche2; dgche_dVb = (beta * dfgche1_dVb + fgche1 * dbeta_dVb - gche * dfgche2_dVb) / fgche2; T0 = 1.0 + gche * Rds; Idl = gche / T0; T1 = (1.0 - Idl * Rds) / T0; T2 = Idl * Idl; dIdl_dVg = T1 * dgche_dVg - T2 * dRds_dVg; dIdl_dVd = T1 * dgche_dVd; dIdl_dVb = T1 * dgche_dVb - T2 * dRds_dVb; /* Calculate degradation factor due to pocket implant */ if (pParam->BSIM4fprout <= 0.0) { FP = 1.0; dFP_dVg = 0.0; } else { T9 = pParam->BSIM4fprout * sqrt(Leff) / Vgst2Vtm; FP = 1.0 / (1.0 + T9); dFP_dVg = FP * FP * T9 / Vgst2Vtm; } /* Calculate VACLM */ T8 = pParam->BSIM4pvag / EsatL; T9 = T8 * Vgsteff; if (T9 > -0.9) { PvagTerm = 1.0 + T9; dPvagTerm_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL); dPvagTerm_dVb = -T9 * dEsatL_dVb / EsatL; dPvagTerm_dVd = -T9 * dEsatL_dVd / EsatL; } else { T4 = 1.0 / (17.0 + 20.0 * T9); PvagTerm = (0.8 + T9) * T4; T4 *= T4; dPvagTerm_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL) * T4; T9 *= T4 / EsatL; dPvagTerm_dVb = -T9 * dEsatL_dVb; dPvagTerm_dVd = -T9 * dEsatL_dVd; } if ((pParam->BSIM4pclm > MIN_EXP) && (diffVds > 1.0e-10)) { T0 = 1.0 + Rds * Idl; dT0_dVg = dRds_dVg * Idl + Rds * dIdl_dVg; dT0_dVd = Rds * dIdl_dVd; dT0_dVb = dRds_dVb * Idl + Rds * dIdl_dVb; T2 = Vdsat / Esat; T1 = Leff + T2; dT1_dVg = (dVdsat_dVg - T2 * dEsatL_dVg / Leff) / Esat; dT1_dVd = (dVdsat_dVd - T2 * dEsatL_dVd / Leff) / Esat; dT1_dVb = (dVdsat_dVb - T2 * dEsatL_dVb / Leff) / Esat; Cclm = FP * PvagTerm * T0 * T1 / (pParam->BSIM4pclm * pParam->BSIM4litl); dCclm_dVg = Cclm * (dFP_dVg / FP + dPvagTerm_dVg / PvagTerm + dT0_dVg / T0 + dT1_dVg / T1); dCclm_dVb = Cclm * (dPvagTerm_dVb / PvagTerm + dT0_dVb / T0 + dT1_dVb / T1); dCclm_dVd = Cclm * (dPvagTerm_dVd / PvagTerm + dT0_dVd / T0 + dT1_dVd / T1); VACLM = Cclm * diffVds; dVACLM_dVg = dCclm_dVg * diffVds - dVdseff_dVg * Cclm; dVACLM_dVb = dCclm_dVb * diffVds - dVdseff_dVb * Cclm; dVACLM_dVd = dCclm_dVd * diffVds + (1.0 - dVdseff_dVd) * Cclm; } else { VACLM = Cclm = MAX_EXP; dVACLM_dVd = dVACLM_dVg = dVACLM_dVb = 0.0; dCclm_dVd = dCclm_dVg = dCclm_dVb = 0.0; } /* Calculate VADIBL */ if (pParam->BSIM4thetaRout > MIN_EXP) { T8 = Abulk * Vdsat; T0 = Vgst2Vtm * T8; dT0_dVg = Vgst2Vtm * Abulk * dVdsat_dVg + T8 + Vgst2Vtm * Vdsat * dAbulk_dVg; dT0_dVb = Vgst2Vtm * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb); dT0_dVd = Vgst2Vtm * Abulk * dVdsat_dVd; T1 = Vgst2Vtm + T8; dT1_dVg = 1.0 + Abulk * dVdsat_dVg + Vdsat * dAbulk_dVg; dT1_dVb = Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat; dT1_dVd = Abulk * dVdsat_dVd; T9 = T1 * T1; T2 = pParam->BSIM4thetaRout; VADIBL = (Vgst2Vtm - T0 / T1) / T2; dVADIBL_dVg = (1.0 - dT0_dVg / T1 + T0 * dT1_dVg / T9) / T2; dVADIBL_dVb = (-dT0_dVb / T1 + T0 * dT1_dVb / T9) / T2; dVADIBL_dVd = (-dT0_dVd / T1 + T0 * dT1_dVd / T9) / T2; T7 = pParam->BSIM4pdiblb * Vbseff; if (T7 >= -0.9) { T3 = 1.0 / (1.0 + T7); VADIBL *= T3; dVADIBL_dVg *= T3; dVADIBL_dVb = (dVADIBL_dVb - VADIBL * pParam->BSIM4pdiblb) * T3; dVADIBL_dVd *= T3; } else { T4 = 1.0 / (0.8 + T7); T3 = (17.0 + 20.0 * T7) * T4; dVADIBL_dVg *= T3; dVADIBL_dVb = dVADIBL_dVb * T3 - VADIBL * pParam->BSIM4pdiblb * T4 * T4; dVADIBL_dVd *= T3; VADIBL *= T3; } dVADIBL_dVg = dVADIBL_dVg * PvagTerm + VADIBL * dPvagTerm_dVg; dVADIBL_dVb = dVADIBL_dVb * PvagTerm + VADIBL * dPvagTerm_dVb; dVADIBL_dVd = dVADIBL_dVd * PvagTerm + VADIBL * dPvagTerm_dVd; VADIBL *= PvagTerm; } else { VADIBL = MAX_EXP; dVADIBL_dVd = dVADIBL_dVg = dVADIBL_dVb = 0.0; } /* Calculate Va */ Va = Vasat + VACLM; dVa_dVg = dVasat_dVg + dVACLM_dVg; dVa_dVb = dVasat_dVb + dVACLM_dVb; dVa_dVd = dVasat_dVd + dVACLM_dVd; /* Calculate VADITS */ T0 = pParam->BSIM4pditsd * Vds; if (T0 > EXP_THRESHOLD) { T1 = MAX_EXP; dT1_dVd = 0; } else { T1 = exp(T0); dT1_dVd = T1 * pParam->BSIM4pditsd; } if (pParam->BSIM4pdits > MIN_EXP) { T2 = 1.0 + model->BSIM4pditsl * Leff; VADITS = (1.0 + T2 * T1) / pParam->BSIM4pdits; dVADITS_dVg = VADITS * dFP_dVg; dVADITS_dVd = FP * T2 * dT1_dVd / pParam->BSIM4pdits; VADITS *= FP; } else { VADITS = MAX_EXP; dVADITS_dVg = dVADITS_dVd = 0; } /* Calculate VASCBE */ if ((pParam->BSIM4pscbe2 > 0.0)&&(pParam->BSIM4pscbe1>=0.0)) /*4.6.2*/ { if (diffVds > pParam->BSIM4pscbe1 * pParam->BSIM4litl / EXP_THRESHOLD) { T0 = pParam->BSIM4pscbe1 * pParam->BSIM4litl / diffVds; VASCBE = Leff * exp(T0) / pParam->BSIM4pscbe2; T1 = T0 * VASCBE / diffVds; dVASCBE_dVg = T1 * dVdseff_dVg; dVASCBE_dVd = -T1 * (1.0 - dVdseff_dVd); dVASCBE_dVb = T1 * dVdseff_dVb; } else { VASCBE = MAX_EXP * Leff/pParam->BSIM4pscbe2; dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0; } } else { VASCBE = MAX_EXP; dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0; } /* Add DIBL to Ids */ T9 = diffVds / VADIBL; T0 = 1.0 + T9; Idsa = Idl * T0; dIdsa_dVg = T0 * dIdl_dVg - Idl * (dVdseff_dVg + T9 * dVADIBL_dVg) / VADIBL; dIdsa_dVd = T0 * dIdl_dVd + Idl * (1.0 - dVdseff_dVd - T9 * dVADIBL_dVd) / VADIBL; dIdsa_dVb = T0 * dIdl_dVb - Idl * (dVdseff_dVb + T9 * dVADIBL_dVb) / VADIBL; /* Add DITS to Ids */ T9 = diffVds / VADITS; T0 = 1.0 + T9; dIdsa_dVg = T0 * dIdsa_dVg - Idsa * (dVdseff_dVg + T9 * dVADITS_dVg) / VADITS; dIdsa_dVd = T0 * dIdsa_dVd + Idsa * (1.0 - dVdseff_dVd - T9 * dVADITS_dVd) / VADITS; dIdsa_dVb = T0 * dIdsa_dVb - Idsa * dVdseff_dVb / VADITS; Idsa *= T0; /* Add CLM to Ids */ T0 = log(Va / Vasat); dT0_dVg = dVa_dVg / Va - dVasat_dVg / Vasat; dT0_dVb = dVa_dVb / Va - dVasat_dVb / Vasat; dT0_dVd = dVa_dVd / Va - dVasat_dVd / Vasat; T1 = T0 / Cclm; T9 = 1.0 + T1; dT9_dVg = (dT0_dVg - T1 * dCclm_dVg) / Cclm; dT9_dVb = (dT0_dVb - T1 * dCclm_dVb) / Cclm; dT9_dVd = (dT0_dVd - T1 * dCclm_dVd) / Cclm; dIdsa_dVg = dIdsa_dVg * T9 + Idsa * dT9_dVg; dIdsa_dVb = dIdsa_dVb * T9 + Idsa * dT9_dVb; dIdsa_dVd = dIdsa_dVd * T9 + Idsa * dT9_dVd; Idsa *= T9; /* Substrate current begins */ tmp = pParam->BSIM4alpha0 + pParam->BSIM4alpha1 * Leff; if ((tmp <= 0.0) || (pParam->BSIM4beta0 <= 0.0)) { Isub = Gbd = Gbb = Gbg = 0.0; } else { T2 = tmp / Leff; if (diffVds > pParam->BSIM4beta0 / EXP_THRESHOLD) { T0 = -pParam->BSIM4beta0 / diffVds; T1 = T2 * diffVds * exp(T0); T3 = T1 / diffVds * (T0 - 1.0); dT1_dVg = T3 * dVdseff_dVg; dT1_dVd = T3 * (dVdseff_dVd - 1.0); dT1_dVb = T3 * dVdseff_dVb; } else { T3 = T2 * MIN_EXP; T1 = T3 * diffVds; dT1_dVg = -T3 * dVdseff_dVg; dT1_dVd = T3 * (1.0 - dVdseff_dVd); dT1_dVb = -T3 * dVdseff_dVb; } T4 = Idsa * Vdseff; Isub = T1 * T4; Gbg = T1 * (dIdsa_dVg * Vdseff + Idsa * dVdseff_dVg) + T4 * dT1_dVg; Gbd = T1 * (dIdsa_dVd * Vdseff + Idsa * dVdseff_dVd) + T4 * dT1_dVd; Gbb = T1 * (dIdsa_dVb * Vdseff + Idsa * dVdseff_dVb) + T4 * dT1_dVb; Gbd += Gbg * dVgsteff_dVd; Gbb += Gbg * dVgsteff_dVb; Gbg *= dVgsteff_dVg; Gbb *= dVbseff_dVb; } here->BSIM4csub = Isub; here->BSIM4gbbs = Gbb; here->BSIM4gbgs = Gbg; here->BSIM4gbds = Gbd; /* Add SCBE to Ids */ T9 = diffVds / VASCBE; T0 = 1.0 + T9; Ids = Idsa * T0; Gm = T0 * dIdsa_dVg - Idsa * (dVdseff_dVg + T9 * dVASCBE_dVg) / VASCBE; Gds = T0 * dIdsa_dVd + Idsa * (1.0 - dVdseff_dVd - T9 * dVASCBE_dVd) / VASCBE; Gmb = T0 * dIdsa_dVb - Idsa * (dVdseff_dVb + T9 * dVASCBE_dVb) / VASCBE; tmp1 = Gds + Gm * dVgsteff_dVd; tmp2 = Gmb + Gm * dVgsteff_dVb; tmp3 = Gm; Gm = (Ids * dVdseff_dVg + Vdseff * tmp3) * dVgsteff_dVg; Gds = Ids * (dVdseff_dVd + dVdseff_dVg * dVgsteff_dVd) + Vdseff * tmp1; Gmb = (Ids * (dVdseff_dVb + dVdseff_dVg * dVgsteff_dVb) + Vdseff * tmp2) * dVbseff_dVb; cdrain = Ids * Vdseff; /* Source End Velocity Limit */ if((model->BSIM4vtlGiven) && (model->BSIM4vtl > 0.0) ) { T12 = 1.0 / Leff / CoxeffWovL; T11 = T12 / Vgsteff; T10 = -T11 / Vgsteff; vs = cdrain * T11; /* vs */ dvs_dVg = Gm * T11 + cdrain * T10 * dVgsteff_dVg; dvs_dVd = Gds * T11 + cdrain * T10 * dVgsteff_dVd; dvs_dVb = Gmb * T11 + cdrain * T10 * dVgsteff_dVb; T0 = 2 * MM; T1 = vs / (pParam->BSIM4vtl * pParam->BSIM4tfactor); if(T1 > 0.0) { T2 = 1.0 + exp(T0 * log(T1)); T3 = (T2 - 1.0) * T0 / vs; Fsevl = 1.0 / exp(log(T2)/ T0); dT2_dVg = T3 * dvs_dVg; dT2_dVd = T3 * dvs_dVd; dT2_dVb = T3 * dvs_dVb; T4 = -1.0 / T0 * Fsevl / T2; dFsevl_dVg = T4 * dT2_dVg; dFsevl_dVd = T4 * dT2_dVd; dFsevl_dVb = T4 * dT2_dVb; } else { Fsevl = 1.0; dFsevl_dVg = 0.0; dFsevl_dVd = 0.0; dFsevl_dVb = 0.0; } Gm *=Fsevl; Gm += cdrain * dFsevl_dVg; Gmb *=Fsevl; Gmb += cdrain * dFsevl_dVb; Gds *=Fsevl; Gds += cdrain * dFsevl_dVd; cdrain *= Fsevl; } here->BSIM4gds = Gds; here->BSIM4gm = Gm; here->BSIM4gmbs = Gmb; here->BSIM4IdovVds = Ids; if( here->BSIM4IdovVds <= 1.0e-9) here->BSIM4IdovVds = 1.0e-9; /* Calculate Rg */ if ((here->BSIM4rgateMod > 1) || (here->BSIM4trnqsMod != 0) || (here->BSIM4acnqsMod != 0)) { T9 = pParam->BSIM4xrcrg2 * model->BSIM4vtm; T0 = T9 * beta; dT0_dVd = (dbeta_dVd + dbeta_dVg * dVgsteff_dVd) * T9; dT0_dVb = (dbeta_dVb + dbeta_dVg * dVgsteff_dVb) * T9; dT0_dVg = dbeta_dVg * T9; here->BSIM4gcrg = pParam->BSIM4xrcrg1 * ( T0 + Ids); here->BSIM4gcrgd = pParam->BSIM4xrcrg1 * (dT0_dVd + tmp1); here->BSIM4gcrgb = pParam->BSIM4xrcrg1 * (dT0_dVb + tmp2) * dVbseff_dVb; here->BSIM4gcrgg = pParam->BSIM4xrcrg1 * (dT0_dVg + tmp3) * dVgsteff_dVg; if (here->BSIM4nf != 1.0) { here->BSIM4gcrg *= here->BSIM4nf; here->BSIM4gcrgg *= here->BSIM4nf; here->BSIM4gcrgd *= here->BSIM4nf; here->BSIM4gcrgb *= here->BSIM4nf; } if (here->BSIM4rgateMod == 2) { T10 = here->BSIM4grgeltd * here->BSIM4grgeltd; T11 = here->BSIM4grgeltd + here->BSIM4gcrg; here->BSIM4gcrg = here->BSIM4grgeltd * here->BSIM4gcrg / T11; T12 = T10 / T11 / T11; here->BSIM4gcrgg *= T12; here->BSIM4gcrgd *= T12; here->BSIM4gcrgb *= T12; } here->BSIM4gcrgs = -(here->BSIM4gcrgg + here->BSIM4gcrgd + here->BSIM4gcrgb); } /* Calculate bias-dependent external S/D resistance */ if (model->BSIM4rdsMod) { /* Rs(V) */ T0 = vgs - pParam->BSIM4vfbsd; T1 = sqrt(T0 * T0 + 1.0e-4); vgs_eff = 0.5 * (T0 + T1); dvgs_eff_dvg = vgs_eff / T1; T0 = 1.0 + pParam->BSIM4prwg * vgs_eff; dT0_dvg = -pParam->BSIM4prwg / T0 / T0 * dvgs_eff_dvg; T1 = -pParam->BSIM4prwb * vbs; dT1_dvb = -pParam->BSIM4prwb; T2 = 1.0 / T0 + T1; T3 = T2 + sqrt(T2 * T2 + 0.01); dT3_dvg = T3 / (T3 - T2); dT3_dvb = dT3_dvg * dT1_dvb; dT3_dvg *= dT0_dvg; T4 = pParam->BSIM4rs0 * 0.5; Rs = pParam->BSIM4rswmin + T3 * T4; dRs_dvg = T4 * dT3_dvg; dRs_dvb = T4 * dT3_dvb; T0 = 1.0 + here->BSIM4sourceConductance * Rs; here->BSIM4gstot = here->BSIM4sourceConductance / T0; T0 = -here->BSIM4gstot * here->BSIM4gstot; dgstot_dvd = 0.0; /* place holder */ dgstot_dvg = T0 * dRs_dvg; dgstot_dvb = T0 * dRs_dvb; dgstot_dvs = -(dgstot_dvg + dgstot_dvb + dgstot_dvd); /* Rd(V) */ T0 = vgd - pParam->BSIM4vfbsd; T1 = sqrt(T0 * T0 + 1.0e-4); vgd_eff = 0.5 * (T0 + T1); dvgd_eff_dvg = vgd_eff / T1; T0 = 1.0 + pParam->BSIM4prwg * vgd_eff; dT0_dvg = -pParam->BSIM4prwg / T0 / T0 * dvgd_eff_dvg; T1 = -pParam->BSIM4prwb * vbd; dT1_dvb = -pParam->BSIM4prwb; T2 = 1.0 / T0 + T1; T3 = T2 + sqrt(T2 * T2 + 0.01); dT3_dvg = T3 / (T3 - T2); dT3_dvb = dT3_dvg * dT1_dvb; dT3_dvg *= dT0_dvg; T4 = pParam->BSIM4rd0 * 0.5; Rd = pParam->BSIM4rdwmin + T3 * T4; dRd_dvg = T4 * dT3_dvg; dRd_dvb = T4 * dT3_dvb; T0 = 1.0 + here->BSIM4drainConductance * Rd; here->BSIM4gdtot = here->BSIM4drainConductance / T0; T0 = -here->BSIM4gdtot * here->BSIM4gdtot; dgdtot_dvs = 0.0; dgdtot_dvg = T0 * dRd_dvg; dgdtot_dvb = T0 * dRd_dvb; dgdtot_dvd = -(dgdtot_dvg + dgdtot_dvb + dgdtot_dvs); here->BSIM4gstotd = vses * dgstot_dvd; here->BSIM4gstotg = vses * dgstot_dvg; here->BSIM4gstots = vses * dgstot_dvs; here->BSIM4gstotb = vses * dgstot_dvb; T2 = vdes - vds; here->BSIM4gdtotd = T2 * dgdtot_dvd; here->BSIM4gdtotg = T2 * dgdtot_dvg; here->BSIM4gdtots = T2 * dgdtot_dvs; here->BSIM4gdtotb = T2 * dgdtot_dvb; } else /* WDLiu: for bypass */ { here->BSIM4gstot = here->BSIM4gstotd = here->BSIM4gstotg = 0.0; here->BSIM4gstots = here->BSIM4gstotb = 0.0; here->BSIM4gdtot = here->BSIM4gdtotd = here->BSIM4gdtotg = 0.0; here->BSIM4gdtots = here->BSIM4gdtotb = 0.0; } /* GIDL/GISL Models */ if(model->BSIM4mtrlMod == 0) T0 = 3.0 * toxe; else T0 = model->BSIM4epsrsub * toxe / epsrox; /* Calculate GIDL current */ vgs_eff = here->BSIM4vgs_eff; dvgs_eff_dvg = here->BSIM4dvgs_eff_dvg; vgd_eff = here->BSIM4vgd_eff; dvgd_eff_dvg = here->BSIM4dvgd_eff_dvg; if (model->BSIM4gidlMod==0){ if(model->BSIM4mtrlMod ==0) T1 = (vds - vgs_eff - pParam->BSIM4egidl ) / T0; else T1 = (vds - vgs_eff - pParam->BSIM4egidl + pParam->BSIM4vfbsd) / T0; if ((pParam->BSIM4agidl <= 0.0) || (pParam->BSIM4bgidl <= 0.0) || (T1 <= 0.0) || (pParam->BSIM4cgidl <= 0.0) || (vbd > 0.0)) Igidl = Ggidld = Ggidlg = Ggidlb = 0.0; else { dT1_dVd = 1.0 / T0; dT1_dVg = -dvgs_eff_dvg * dT1_dVd; T2 = pParam->BSIM4bgidl / T1; if (T2 < 100.0) { Igidl = pParam->BSIM4agidl * pParam->BSIM4weffCJ * T1 * exp(-T2); T3 = Igidl * (1.0 + T2) / T1; Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; } else { Igidl = pParam->BSIM4agidl * pParam->BSIM4weffCJ * 3.720075976e-44; Ggidld = Igidl * dT1_dVd; Ggidlg = Igidl * dT1_dVg; Igidl *= T1; } T4 = vbd * vbd; T5 = -vbd * T4; T6 = pParam->BSIM4cgidl + T5; T7 = T5 / T6; T8 = 3.0 * pParam->BSIM4cgidl * T4 / T6 / T6; Ggidld = Ggidld * T7 + Igidl * T8; Ggidlg = Ggidlg * T7; Ggidlb = -Igidl * T8; Igidl *= T7; } here->BSIM4Igidl = Igidl; here->BSIM4ggidld = Ggidld; here->BSIM4ggidlg = Ggidlg; here->BSIM4ggidlb = Ggidlb; /* Calculate GISL current */ if(model->BSIM4mtrlMod ==0) T1 = (-vds - vgd_eff - pParam->BSIM4egisl ) / T0; else T1 = (-vds - vgd_eff - pParam->BSIM4egisl + pParam->BSIM4vfbsd ) / T0; if ((pParam->BSIM4agisl <= 0.0) || (pParam->BSIM4bgisl <= 0.0) || (T1 <= 0.0) || (pParam->BSIM4cgisl <= 0.0) || (vbs > 0.0)) Igisl = Ggisls = Ggislg = Ggislb = 0.0; else { dT1_dVd = 1.0 / T0; dT1_dVg = -dvgd_eff_dvg * dT1_dVd; T2 = pParam->BSIM4bgisl / T1; if (T2 < 100.0) { Igisl = pParam->BSIM4agisl * pParam->BSIM4weffCJ * T1 * exp(-T2); T3 = Igisl * (1.0 + T2) / T1; Ggisls = T3 * dT1_dVd; Ggislg = T3 * dT1_dVg; } else { Igisl = pParam->BSIM4agisl * pParam->BSIM4weffCJ * 3.720075976e-44; Ggisls = Igisl * dT1_dVd; Ggislg = Igisl * dT1_dVg; Igisl *= T1; } T4 = vbs * vbs; T5 = -vbs * T4; T6 = pParam->BSIM4cgisl + T5; T7 = T5 / T6; T8 = 3.0 * pParam->BSIM4cgisl * T4 / T6 / T6; Ggisls = Ggisls * T7 + Igisl * T8; Ggislg = Ggislg * T7; Ggislb = -Igisl * T8; Igisl *= T7; } here->BSIM4Igisl = Igisl; here->BSIM4ggisls = Ggisls; here->BSIM4ggislg = Ggislg; here->BSIM4ggislb = Ggislb; } else{ /* v4.7 New Gidl/GISL model */ /* GISL */ if (model->BSIM4mtrlMod==0) T1 = (-vds - pParam->BSIM4rgisl * vgd_eff - pParam->BSIM4egisl) / T0; else T1 = (-vds - pParam->BSIM4rgisl * vgd_eff - pParam->BSIM4egisl + pParam->BSIM4vfbsd) / T0; if ((pParam->BSIM4agisl <= 0.0) || (pParam->BSIM4bgisl <= 0.0) || (T1 <= 0.0) || (pParam->BSIM4cgisl < 0.0) ) Igisl = Ggisls = Ggislg = Ggislb = 0.0; else { dT1_dVd = 1 / T0; dT1_dVg = - pParam->BSIM4rgisl * dT1_dVd * dvgd_eff_dvg; T2 = pParam->BSIM4bgisl / T1; if (T2 < EXPL_THRESHOLD) { Igisl = pParam->BSIM4weffCJ * pParam->BSIM4agisl * T1 * exp(-T2); T3 = Igisl / T1 * (T2 + 1); Ggisls = T3 * dT1_dVd; Ggislg = T3 * dT1_dVg; } else { T3 = pParam->BSIM4weffCJ * pParam->BSIM4agisl * MIN_EXPL; Igisl = T3 * T1 ; Ggisls = T3 * dT1_dVd; Ggislg = T3 * dT1_dVg; } T4 = vbs - pParam->BSIM4fgisl; if (T4==0) T5 = EXPL_THRESHOLD; else T5 = pParam->BSIM4kgisl / T4; if (T5<EXPL_THRESHOLD) {T6 = exp(T5); Ggislb = -Igisl * T6 * T5 / T4; } else {T6 = MAX_EXPL; Ggislb=0.0; } Ggisls*=T6; Ggislg*=T6; Igisl*=T6; } here->BSIM4Igisl = Igisl; here->BSIM4ggisls = Ggisls; here->BSIM4ggislg = Ggislg; here->BSIM4ggislb = Ggislb; /* End of GISL */ /* GIDL */ if (model->BSIM4mtrlMod==0) T1 = (vds - pParam->BSIM4rgidl * vgs_eff - pParam->BSIM4egidl) / T0; else T1 = (vds - pParam->BSIM4rgidl * vgs_eff - pParam->BSIM4egidl + pParam->BSIM4vfbsd) / T0; if ((pParam->BSIM4agidl <= 0.0) || (pParam->BSIM4bgidl <= 0.0) || (T1 <= 0.0) || (pParam->BSIM4cgidl < 0.0) ) Igidl = Ggidld = Ggidlg = Ggidlb = 0.0; else { dT1_dVd = 1 / T0; dT1_dVg = - pParam->BSIM4rgidl * dT1_dVd * dvgs_eff_dvg; T2 = pParam->BSIM4bgidl / T1; if (T2 < EXPL_THRESHOLD) { Igidl = pParam->BSIM4weffCJ * pParam->BSIM4agidl * T1 * exp(-T2); T3 = Igidl / T1 * (T2 + 1); Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; } else { T3 = pParam->BSIM4weffCJ * pParam->BSIM4agidl * MIN_EXPL; Igidl = T3 * T1 ; Ggidld = T3 * dT1_dVd; Ggidlg = T3 * dT1_dVg; } T4 = vbd - pParam->BSIM4fgidl; if (T4==0) T5 = EXPL_THRESHOLD; else T5 = pParam->BSIM4kgidl / T4; if (T5<EXPL_THRESHOLD) {T6 = exp(T5); Ggidlb = -Igidl * T6 * T5 / T4; } else {T6 = MAX_EXPL; Ggidlb=0.0; } Ggidld *= T6; Ggidlg *= T6; Igidl *= T6; } here->BSIM4Igidl = Igidl; here->BSIM4ggidld = Ggidld; here->BSIM4ggidlg = Ggidlg; here->BSIM4ggidlb = Ggidlb; /* End of New GIDL */ } /*End of Gidl*/ /* Calculate gate tunneling current */ if ((model->BSIM4igcMod != 0) || (model->BSIM4igbMod != 0)) { Vfb = here->BSIM4vfbzb; V3 = Vfb - Vgs_eff + Vbseff - DELTA_3; if (Vfb <= 0.0) T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfb); else T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfb); T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = Vfb - 0.5 * (V3 + T0); dVfbeff_dVg = T1 * dVgs_eff_dVg; dVfbeff_dVb = -T1; /* WDLiu: -No surprise? No. -Good! */ Voxacc = Vfb - Vfbeff; dVoxacc_dVg = -dVfbeff_dVg; dVoxacc_dVb = -dVfbeff_dVb; if (Voxacc < 0.0) /* WDLiu: Avoiding numerical instability. */ Voxacc = dVoxacc_dVg = dVoxacc_dVb = 0.0; T0 = 0.5 * pParam->BSIM4k1ox; T3 = Vgs_eff - Vfbeff - Vbseff - Vgsteff; if (pParam->BSIM4k1ox == 0.0) Voxdepinv = dVoxdepinv_dVg = dVoxdepinv_dVd = dVoxdepinv_dVb = 0.0; else if (T3 < 0.0) { Voxdepinv = -T3; dVoxdepinv_dVg = -dVgs_eff_dVg + dVfbeff_dVg + dVgsteff_dVg; dVoxdepinv_dVd = dVgsteff_dVd; dVoxdepinv_dVb = dVfbeff_dVb + 1.0 + dVgsteff_dVb; } else { T1 = sqrt(T0 * T0 + T3); T2 = T0 / T1; Voxdepinv = pParam->BSIM4k1ox * (T1 - T0); dVoxdepinv_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg); dVoxdepinv_dVd = -T2 * dVgsteff_dVd; dVoxdepinv_dVb = -T2 * (dVfbeff_dVb + 1.0 + dVgsteff_dVb); } Voxdepinv += Vgsteff; dVoxdepinv_dVg += dVgsteff_dVg; dVoxdepinv_dVd += dVgsteff_dVd; dVoxdepinv_dVb += dVgsteff_dVb; } if(model->BSIM4tempMod < 2) tmp = Vtm; else /* model->BSIM4tempMod = 2 , 3*/ tmp = Vtm0; if (model->BSIM4igcMod) { T0 = tmp * pParam->BSIM4nigc; if(model->BSIM4igcMod == 1) { VxNVt = (Vgs_eff - model->BSIM4type * here->BSIM4vth0) / T0; if (VxNVt > EXP_THRESHOLD) { Vaux = Vgs_eff - model->BSIM4type * here->BSIM4vth0; dVaux_dVg = dVgs_eff_dVg; dVaux_dVd = 0.0; dVaux_dVb = 0.0; } } else if (model->BSIM4igcMod == 2) { VxNVt = (Vgs_eff - here->BSIM4von) / T0; if (VxNVt > EXP_THRESHOLD) { Vaux = Vgs_eff - here->BSIM4von; dVaux_dVg = dVgs_eff_dVg; dVaux_dVd = -dVth_dVd; dVaux_dVb = -dVth_dVb; } } if (VxNVt < -EXP_THRESHOLD) { Vaux = T0 * log(1.0 + MIN_EXP); dVaux_dVg = dVaux_dVd = dVaux_dVb = 0.0; } else if ((VxNVt >= -EXP_THRESHOLD) && (VxNVt <= EXP_THRESHOLD)) { ExpVxNVt = exp(VxNVt); Vaux = T0 * log(1.0 + ExpVxNVt); dVaux_dVg = ExpVxNVt / (1.0 + ExpVxNVt); if(model->BSIM4igcMod == 1) { dVaux_dVd = 0.0; dVaux_dVb = 0.0; } else if (model->BSIM4igcMod == 2) { dVaux_dVd = -dVaux_dVg* dVth_dVd; /* Synopsys 08/30/2013 modify */ dVaux_dVb = -dVaux_dVg* dVth_dVb; /* Synopsys 08/30/2013 modify */ } dVaux_dVg *= dVgs_eff_dVg; } T2 = Vgs_eff * Vaux; dT2_dVg = dVgs_eff_dVg * Vaux + Vgs_eff * dVaux_dVg; dT2_dVd = Vgs_eff * dVaux_dVd; dT2_dVb = Vgs_eff * dVaux_dVb; T11 = pParam->BSIM4Aechvb; T12 = pParam->BSIM4Bechvb; T3 = pParam->BSIM4aigc * pParam->BSIM4cigc - pParam->BSIM4bigc; T4 = pParam->BSIM4bigc * pParam->BSIM4cigc; T5 = T12 * (pParam->BSIM4aigc + T3 * Voxdepinv - T4 * Voxdepinv * Voxdepinv); if (T5 > EXP_THRESHOLD) { T6 = MAX_EXP; dT6_dVg = dT6_dVd = dT6_dVb = 0.0; } else if (T5 < -EXP_THRESHOLD) { T6 = MIN_EXP; dT6_dVg = dT6_dVd = dT6_dVb = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxdepinv); dT6_dVd = dT6_dVg * dVoxdepinv_dVd; dT6_dVb = dT6_dVg * dVoxdepinv_dVb; dT6_dVg *= dVoxdepinv_dVg; } Igc = T11 * T2 * T6; dIgc_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgc_dVd = T11 * (T2 * dT6_dVd + T6 * dT2_dVd); dIgc_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb); if (model->BSIM4pigcdGiven) { Pigcd = pParam->BSIM4pigcd; dPigcd_dVg = dPigcd_dVd = dPigcd_dVb = 0.0; } else { /* T11 = pParam->BSIM4Bechvb * toxe; v4.7 */ T11 = -pParam->BSIM4Bechvb; T12 = Vgsteff + 1.0e-20; T13 = T11 / T12 / T12; T14 = -T13 / T12; Pigcd = T13 * (1.0 - 0.5 * Vdseff / T12); dPigcd_dVg = T14 * (2.0 + 0.5 * (dVdseff_dVg - 3.0 * Vdseff / T12)); dPigcd_dVd = 0.5 * T14 * dVdseff_dVd; dPigcd_dVb = 0.5 * T14 * dVdseff_dVb; } T7 = -Pigcd * Vdseff; /* bugfix */ dT7_dVg = -Vdseff * dPigcd_dVg - Pigcd * dVdseff_dVg; dT7_dVd = -Vdseff * dPigcd_dVd - Pigcd * dVdseff_dVd + dT7_dVg * dVgsteff_dVd; dT7_dVb = -Vdseff * dPigcd_dVb - Pigcd * dVdseff_dVb + dT7_dVg * dVgsteff_dVb; dT7_dVg *= dVgsteff_dVg; /*dT7_dVb *= dVbseff_dVb;*/ /* Synopsys, 2013/08/30 */ T8 = T7 * T7 + 2.0e-4; dT8_dVg = 2.0 * T7; dT8_dVd = dT8_dVg * dT7_dVd; dT8_dVb = dT8_dVg * dT7_dVb; dT8_dVg *= dT7_dVg; if (T7 > EXP_THRESHOLD) { T9 = MAX_EXP; dT9_dVg = dT9_dVd = dT9_dVb = 0.0; } else if (T7 < -EXP_THRESHOLD) { T9 = MIN_EXP; dT9_dVg = dT9_dVd = dT9_dVb = 0.0; } else { T9 = exp(T7); dT9_dVg = T9 * dT7_dVg; dT9_dVd = T9 * dT7_dVd; dT9_dVb = T9 * dT7_dVb; } T0 = T8 * T8; T1 = T9 - 1.0 + 1.0e-4; T10 = (T1 - T7) / T8; dT10_dVg = (dT9_dVg - dT7_dVg - T10 * dT8_dVg) / T8; dT10_dVd = (dT9_dVd - dT7_dVd - T10 * dT8_dVd) / T8; dT10_dVb = (dT9_dVb - dT7_dVb - T10 * dT8_dVb) / T8; Igcs = Igc * T10; dIgcs_dVg = dIgc_dVg * T10 + Igc * dT10_dVg; dIgcs_dVd = dIgc_dVd * T10 + Igc * dT10_dVd; dIgcs_dVb = dIgc_dVb * T10 + Igc * dT10_dVb; T1 = T9 - 1.0 - 1.0e-4; T10 = (T7 * T9 - T1) / T8; dT10_dVg = (dT7_dVg * T9 + (T7 - 1.0) * dT9_dVg - T10 * dT8_dVg) / T8; dT10_dVd = (dT7_dVd * T9 + (T7 - 1.0) * dT9_dVd - T10 * dT8_dVd) / T8; dT10_dVb = (dT7_dVb * T9 + (T7 - 1.0) * dT9_dVb - T10 * dT8_dVb) / T8; Igcd = Igc * T10; dIgcd_dVg = dIgc_dVg * T10 + Igc * dT10_dVg; dIgcd_dVd = dIgc_dVd * T10 + Igc * dT10_dVd; dIgcd_dVb = dIgc_dVb * T10 + Igc * dT10_dVb; here->BSIM4Igcs = Igcs; here->BSIM4gIgcsg = dIgcs_dVg; here->BSIM4gIgcsd = dIgcs_dVd; here->BSIM4gIgcsb = dIgcs_dVb * dVbseff_dVb; here->BSIM4Igcd = Igcd; here->BSIM4gIgcdg = dIgcd_dVg; here->BSIM4gIgcdd = dIgcd_dVd; here->BSIM4gIgcdb = dIgcd_dVb * dVbseff_dVb; T0 = vgs - (pParam->BSIM4vfbsd + pParam->BSIM4vfbsdoff); vgs_eff = sqrt(T0 * T0 + 1.0e-4); dvgs_eff_dvg = T0 / vgs_eff; T2 = vgs * vgs_eff; dT2_dVg = vgs * dvgs_eff_dvg + vgs_eff; T11 = pParam->BSIM4AechvbEdgeS; T12 = pParam->BSIM4BechvbEdge; T3 = pParam->BSIM4aigs * pParam->BSIM4cigs - pParam->BSIM4bigs; T4 = pParam->BSIM4bigs * pParam->BSIM4cigs; T5 = T12 * (pParam->BSIM4aigs + T3 * vgs_eff - T4 * vgs_eff * vgs_eff); if (T5 > EXP_THRESHOLD) { T6 = MAX_EXP; dT6_dVg = 0.0; } else if (T5 < -EXP_THRESHOLD) { T6 = MIN_EXP; dT6_dVg = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * vgs_eff) * dvgs_eff_dvg; } Igs = T11 * T2 * T6; dIgs_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgs_dVs = -dIgs_dVg; T0 = vgd - (pParam->BSIM4vfbsd + pParam->BSIM4vfbsdoff); vgd_eff = sqrt(T0 * T0 + 1.0e-4); dvgd_eff_dvg = T0 / vgd_eff; T2 = vgd * vgd_eff; dT2_dVg = vgd * dvgd_eff_dvg + vgd_eff; T11 = pParam->BSIM4AechvbEdgeD; T3 = pParam->BSIM4aigd * pParam->BSIM4cigd - pParam->BSIM4bigd; T4 = pParam->BSIM4bigd * pParam->BSIM4cigd; T5 = T12 * (pParam->BSIM4aigd + T3 * vgd_eff - T4 * vgd_eff * vgd_eff); if (T5 > EXP_THRESHOLD) { T6 = MAX_EXP; dT6_dVg = 0.0; } else if (T5 < -EXP_THRESHOLD) { T6 = MIN_EXP; dT6_dVg = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * vgd_eff) * dvgd_eff_dvg; } Igd = T11 * T2 * T6; dIgd_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgd_dVd = -dIgd_dVg; here->BSIM4Igs = Igs; here->BSIM4gIgsg = dIgs_dVg; here->BSIM4gIgss = dIgs_dVs; here->BSIM4Igd = Igd; here->BSIM4gIgdg = dIgd_dVg; here->BSIM4gIgdd = dIgd_dVd; } else { here->BSIM4Igcs = here->BSIM4gIgcsg = here->BSIM4gIgcsd = here->BSIM4gIgcsb = 0.0; here->BSIM4Igcd = here->BSIM4gIgcdg = here->BSIM4gIgcdd = here->BSIM4gIgcdb = 0.0; here->BSIM4Igs = here->BSIM4gIgsg = here->BSIM4gIgss = 0.0; here->BSIM4Igd = here->BSIM4gIgdg = here->BSIM4gIgdd = 0.0; } if (model->BSIM4igbMod) { T0 = tmp * pParam->BSIM4nigbacc; T1 = -Vgs_eff + Vbseff + Vfb; VxNVt = T1 / T0; if (VxNVt > EXP_THRESHOLD) { Vaux = T1; dVaux_dVg = -dVgs_eff_dVg; dVaux_dVb = 1.0; } else if (VxNVt < -EXP_THRESHOLD) { Vaux = T0 * log(1.0 + MIN_EXP); dVaux_dVg = dVaux_dVb = 0.0; } else { ExpVxNVt = exp(VxNVt); Vaux = T0 * log(1.0 + ExpVxNVt); dVaux_dVb = ExpVxNVt / (1.0 + ExpVxNVt); dVaux_dVg = -dVaux_dVb * dVgs_eff_dVg; } T2 = (Vgs_eff - Vbseff) * Vaux; dT2_dVg = dVgs_eff_dVg * Vaux + (Vgs_eff - Vbseff) * dVaux_dVg; dT2_dVb = -Vaux + (Vgs_eff - Vbseff) * dVaux_dVb; T11 = 4.97232e-7 * pParam->BSIM4weff * pParam->BSIM4leff * pParam->BSIM4ToxRatio; T12 = -7.45669e11 * toxe; T3 = pParam->BSIM4aigbacc * pParam->BSIM4cigbacc - pParam->BSIM4bigbacc; T4 = pParam->BSIM4bigbacc * pParam->BSIM4cigbacc; T5 = T12 * (pParam->BSIM4aigbacc + T3 * Voxacc - T4 * Voxacc * Voxacc); if (T5 > EXP_THRESHOLD) { T6 = MAX_EXP; dT6_dVg = dT6_dVb = 0.0; } else if (T5 < -EXP_THRESHOLD) { T6 = MIN_EXP; dT6_dVg = dT6_dVb = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxacc); dT6_dVb = dT6_dVg * dVoxacc_dVb; dT6_dVg *= dVoxacc_dVg; } Igbacc = T11 * T2 * T6; dIgbacc_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgbacc_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb); T0 = tmp * pParam->BSIM4nigbinv; T1 = Voxdepinv - pParam->BSIM4eigbinv; VxNVt = T1 / T0; if (VxNVt > EXP_THRESHOLD) { Vaux = T1; dVaux_dVg = dVoxdepinv_dVg; dVaux_dVd = dVoxdepinv_dVd; dVaux_dVb = dVoxdepinv_dVb; } else if (VxNVt < -EXP_THRESHOLD) { Vaux = T0 * log(1.0 + MIN_EXP); dVaux_dVg = dVaux_dVd = dVaux_dVb = 0.0; } else { ExpVxNVt = exp(VxNVt); Vaux = T0 * log(1.0 + ExpVxNVt); dVaux_dVg = ExpVxNVt / (1.0 + ExpVxNVt); dVaux_dVd = dVaux_dVg * dVoxdepinv_dVd; dVaux_dVb = dVaux_dVg * dVoxdepinv_dVb; dVaux_dVg *= dVoxdepinv_dVg; } T2 = (Vgs_eff - Vbseff) * Vaux; dT2_dVg = dVgs_eff_dVg * Vaux + (Vgs_eff - Vbseff) * dVaux_dVg; dT2_dVd = (Vgs_eff - Vbseff) * dVaux_dVd; dT2_dVb = -Vaux + (Vgs_eff - Vbseff) * dVaux_dVb; T11 *= 0.75610; T12 *= 1.31724; T3 = pParam->BSIM4aigbinv * pParam->BSIM4cigbinv - pParam->BSIM4bigbinv; T4 = pParam->BSIM4bigbinv * pParam->BSIM4cigbinv; T5 = T12 * (pParam->BSIM4aigbinv + T3 * Voxdepinv - T4 * Voxdepinv * Voxdepinv); if (T5 > EXP_THRESHOLD) { T6 = MAX_EXP; dT6_dVg = dT6_dVd = dT6_dVb = 0.0; } else if (T5 < -EXP_THRESHOLD) { T6 = MIN_EXP; dT6_dVg = dT6_dVd = dT6_dVb = 0.0; } else { T6 = exp(T5); dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxdepinv); dT6_dVd = dT6_dVg * dVoxdepinv_dVd; dT6_dVb = dT6_dVg * dVoxdepinv_dVb; dT6_dVg *= dVoxdepinv_dVg; } Igbinv = T11 * T2 * T6; dIgbinv_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg); dIgbinv_dVd = T11 * (T2 * dT6_dVd + T6 * dT2_dVd); dIgbinv_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb); here->BSIM4Igb = Igbinv + Igbacc; here->BSIM4gIgbg = dIgbinv_dVg + dIgbacc_dVg; here->BSIM4gIgbd = dIgbinv_dVd; here->BSIM4gIgbb = (dIgbinv_dVb + dIgbacc_dVb) * dVbseff_dVb; } else { here->BSIM4Igb = here->BSIM4gIgbg = here->BSIM4gIgbd = here->BSIM4gIgbs = here->BSIM4gIgbb = 0.0; } /* End of Gate current */ if (here->BSIM4nf != 1.0) { cdrain *= here->BSIM4nf; here->BSIM4gds *= here->BSIM4nf; here->BSIM4gm *= here->BSIM4nf; here->BSIM4gmbs *= here->BSIM4nf; here->BSIM4IdovVds *= here->BSIM4nf; here->BSIM4gbbs *= here->BSIM4nf; here->BSIM4gbgs *= here->BSIM4nf; here->BSIM4gbds *= here->BSIM4nf; here->BSIM4csub *= here->BSIM4nf; here->BSIM4Igidl *= here->BSIM4nf; here->BSIM4ggidld *= here->BSIM4nf; here->BSIM4ggidlg *= here->BSIM4nf; here->BSIM4ggidlb *= here->BSIM4nf; here->BSIM4Igisl *= here->BSIM4nf; here->BSIM4ggisls *= here->BSIM4nf; here->BSIM4ggislg *= here->BSIM4nf; here->BSIM4ggislb *= here->BSIM4nf; here->BSIM4Igcs *= here->BSIM4nf; here->BSIM4gIgcsg *= here->BSIM4nf; here->BSIM4gIgcsd *= here->BSIM4nf; here->BSIM4gIgcsb *= here->BSIM4nf; here->BSIM4Igcd *= here->BSIM4nf; here->BSIM4gIgcdg *= here->BSIM4nf; here->BSIM4gIgcdd *= here->BSIM4nf; here->BSIM4gIgcdb *= here->BSIM4nf; here->BSIM4Igs *= here->BSIM4nf; here->BSIM4gIgsg *= here->BSIM4nf; here->BSIM4gIgss *= here->BSIM4nf; here->BSIM4Igd *= here->BSIM4nf; here->BSIM4gIgdg *= here->BSIM4nf; here->BSIM4gIgdd *= here->BSIM4nf; here->BSIM4Igb *= here->BSIM4nf; here->BSIM4gIgbg *= here->BSIM4nf; here->BSIM4gIgbd *= here->BSIM4nf; here->BSIM4gIgbb *= here->BSIM4nf; } here->BSIM4ggidls = -(here->BSIM4ggidld + here->BSIM4ggidlg + here->BSIM4ggidlb); here->BSIM4ggisld = -(here->BSIM4ggisls + here->BSIM4ggislg + here->BSIM4ggislb); here->BSIM4gIgbs = -(here->BSIM4gIgbg + here->BSIM4gIgbd + here->BSIM4gIgbb); here->BSIM4gIgcss = -(here->BSIM4gIgcsg + here->BSIM4gIgcsd + here->BSIM4gIgcsb); here->BSIM4gIgcds = -(here->BSIM4gIgcdg + here->BSIM4gIgcdd + here->BSIM4gIgcdb); here->BSIM4cd = cdrain; /* Calculations for noise analysis */ if (model->BSIM4tnoiMod == 0) { Abulk = Abulk0 * pParam->BSIM4abulkCVfactor; Vdsat = Vgsteff / Abulk; T0 = Vdsat - Vds - DELTA_4; T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * Vdsat); if (T0 >= 0.0) Vdseff = Vdsat - 0.5 * (T0 + T1); else { T3 = (DELTA_4 + DELTA_4) / (T1 - T0); T4 = 1.0 - T3; T5 = Vdsat * T3 / (T1 - T0); Vdseff = Vdsat * T4; } if (Vds == 0.0) Vdseff = 0.0; T0 = Abulk * Vdseff; T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1.0e-20); T2 = Vdseff / T1; T3 = T0 * T2; here->BSIM4qinv = Coxeff * pParam->BSIM4weffCV * here->BSIM4nf * pParam->BSIM4leffCV * (Vgsteff - 0.5 * T0 + Abulk * T3); } else if(model->BSIM4tnoiMod == 2) { here->BSIM4noiGd0 = here->BSIM4nf * beta * Vgsteff / (1.0 + gche * Rds); } /* * BSIM4 C-V begins */ if ((model->BSIM4xpart < 0) || (!ChargeComputationNeeded)) { qgate = qdrn = qsrc = qbulk = 0.0; here->BSIM4cggb = here->BSIM4cgsb = here->BSIM4cgdb = 0.0; here->BSIM4cdgb = here->BSIM4cdsb = here->BSIM4cddb = 0.0; here->BSIM4cbgb = here->BSIM4cbsb = here->BSIM4cbdb = 0.0; here->BSIM4csgb = here->BSIM4cssb = here->BSIM4csdb = 0.0; here->BSIM4cgbb = here->BSIM4csbb = here->BSIM4cdbb = here->BSIM4cbbb = 0.0; here->BSIM4cqdb = here->BSIM4cqsb = here->BSIM4cqgb = here->BSIM4cqbb = 0.0; here->BSIM4gtau = 0.0; goto finished; } else if (model->BSIM4capMod == 0) { if (Vbseff < 0.0) { VbseffCV = Vbs; /*4.6.2*/ dVbseffCV_dVb = 1.0; } else { VbseffCV = pParam->BSIM4phi - Phis; dVbseffCV_dVb = -dPhis_dVb * dVbseff_dVb; /*4.6.2*/ } Vfb = pParam->BSIM4vfbcv; Vth = Vfb + pParam->BSIM4phi + pParam->BSIM4k1ox * sqrtPhis; Vgst = Vgs_eff - Vth; dVth_dVb = pParam->BSIM4k1ox * dsqrtPhis_dVb *dVbseff_dVb; /*4.6.2*/ dVgst_dVb = -dVth_dVb; dVgst_dVg = dVgs_eff_dVg; CoxWL = model->BSIM4coxe * pParam->BSIM4weffCV * pParam->BSIM4leffCV * here->BSIM4nf; Arg1 = Vgs_eff - VbseffCV - Vfb; if (Arg1 <= 0.0) { qgate = CoxWL * Arg1; qbulk = -qgate; qdrn = 0.0; here->BSIM4cggb = CoxWL * dVgs_eff_dVg; here->BSIM4cgdb = 0.0; here->BSIM4cgsb = CoxWL * (dVbseffCV_dVb - dVgs_eff_dVg); here->BSIM4cdgb = 0.0; here->BSIM4cddb = 0.0; here->BSIM4cdsb = 0.0; here->BSIM4cbgb = -CoxWL * dVgs_eff_dVg; here->BSIM4cbdb = 0.0; here->BSIM4cbsb = -here->BSIM4cgsb; } /* Arg1 <= 0.0, end of accumulation */ else if (Vgst <= 0.0) { T1 = 0.5 * pParam->BSIM4k1ox; T2 = sqrt(T1 * T1 + Arg1); qgate = CoxWL * pParam->BSIM4k1ox * (T2 - T1); qbulk = -qgate; qdrn = 0.0; T0 = CoxWL * T1 / T2; here->BSIM4cggb = T0 * dVgs_eff_dVg; here->BSIM4cgdb = 0.0; here->BSIM4cgsb = T0 * (dVbseffCV_dVb - dVgs_eff_dVg); here->BSIM4cdgb = 0.0; here->BSIM4cddb = 0.0; here->BSIM4cdsb = 0.0; here->BSIM4cbgb = -here->BSIM4cggb; here->BSIM4cbdb = 0.0; here->BSIM4cbsb = -here->BSIM4cgsb; } /* Vgst <= 0.0, end of depletion */ else { One_Third_CoxWL = CoxWL / 3.0; Two_Third_CoxWL = 2.0 * One_Third_CoxWL; AbulkCV = Abulk0 * pParam->BSIM4abulkCVfactor; dAbulkCV_dVb = pParam->BSIM4abulkCVfactor * dAbulk0_dVb*dVbseff_dVb; dVdsat_dVg = 1.0 / AbulkCV; /*4.6.2*/ Vdsat = Vgst * dVdsat_dVg; dVdsat_dVb = - (Vdsat * dAbulkCV_dVb + dVth_dVb)* dVdsat_dVg; if (model->BSIM4xpart > 0.5) { /* 0/100 Charge partition model */ if (Vdsat <= Vds) { /* saturation region */ T1 = Vdsat / 3.0; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4phi - T1); T2 = -Two_Third_CoxWL * Vgst; qbulk = -(qgate + T2); qdrn = 0.0; here->BSIM4cggb = One_Third_CoxWL * (3.0 - dVdsat_dVg) * dVgs_eff_dVg; T2 = -One_Third_CoxWL * dVdsat_dVb; here->BSIM4cgsb = -(here->BSIM4cggb + T2); here->BSIM4cgdb = 0.0; here->BSIM4cdgb = 0.0; here->BSIM4cddb = 0.0; here->BSIM4cdsb = 0.0; here->BSIM4cbgb = -(here->BSIM4cggb - Two_Third_CoxWL * dVgs_eff_dVg); T3 = -(T2 + Two_Third_CoxWL * dVth_dVb); here->BSIM4cbsb = -(here->BSIM4cbgb + T3); here->BSIM4cbdb = 0.0; } else { /* linear region */ Alphaz = Vgst / Vdsat; T1 = 2.0 * Vdsat - Vds; T2 = Vds / (3.0 * T1); T3 = T2 * Vds; T9 = 0.25 * CoxWL; T4 = T9 * Alphaz; T7 = 2.0 * Vds - T1 - 3.0 * T3; T8 = T3 - T1 - 2.0 * Vds; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4phi - 0.5 * (Vds - T3)); T10 = T4 * T8; qdrn = T4 * T7; qbulk = -(qgate + qdrn + T10); T5 = T3 / T1; here->BSIM4cggb = CoxWL * (1.0 - T5 * dVdsat_dVg) * dVgs_eff_dVg; T11 = -CoxWL * T5 * dVdsat_dVb; here->BSIM4cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5); here->BSIM4cgsb = -(here->BSIM4cggb + T11 + here->BSIM4cgdb); T6 = 1.0 / Vdsat; dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg); dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb); T7 = T9 * T7; T8 = T9 * T8; T9 = 2.0 * T4 * (1.0 - 3.0 * T5); here->BSIM4cdgb = (T7 * dAlphaz_dVg - T9 * dVdsat_dVg) * dVgs_eff_dVg; T12 = T7 * dAlphaz_dVb - T9 * dVdsat_dVb; here->BSIM4cddb = T4 * (3.0 - 6.0 * T2 - 3.0 * T5); here->BSIM4cdsb = -(here->BSIM4cdgb + T12 + here->BSIM4cddb); T9 = 2.0 * T4 * (1.0 + T5); T10 = (T8 * dAlphaz_dVg - T9 * dVdsat_dVg) * dVgs_eff_dVg; T11 = T8 * dAlphaz_dVb - T9 * dVdsat_dVb; T12 = T4 * (2.0 * T2 + T5 - 1.0); T0 = -(T10 + T11 + T12); here->BSIM4cbgb = -(here->BSIM4cggb + here->BSIM4cdgb + T10); here->BSIM4cbdb = -(here->BSIM4cgdb + here->BSIM4cddb + T12); here->BSIM4cbsb = -(here->BSIM4cgsb + here->BSIM4cdsb + T0); } } else if (model->BSIM4xpart < 0.5) { /* 40/60 Charge partition model */ if (Vds >= Vdsat) { /* saturation region */ T1 = Vdsat / 3.0; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4phi - T1); T2 = -Two_Third_CoxWL * Vgst; qbulk = -(qgate + T2); qdrn = 0.4 * T2; here->BSIM4cggb = One_Third_CoxWL * (3.0 - dVdsat_dVg) * dVgs_eff_dVg; T2 = -One_Third_CoxWL * dVdsat_dVb; here->BSIM4cgsb = -(here->BSIM4cggb + T2); here->BSIM4cgdb = 0.0; T3 = 0.4 * Two_Third_CoxWL; here->BSIM4cdgb = -T3 * dVgs_eff_dVg; here->BSIM4cddb = 0.0; T4 = T3 * dVth_dVb; here->BSIM4cdsb = -(T4 + here->BSIM4cdgb); here->BSIM4cbgb = -(here->BSIM4cggb - Two_Third_CoxWL * dVgs_eff_dVg); T3 = -(T2 + Two_Third_CoxWL * dVth_dVb); here->BSIM4cbsb = -(here->BSIM4cbgb + T3); here->BSIM4cbdb = 0.0; } else { /* linear region */ Alphaz = Vgst / Vdsat; T1 = 2.0 * Vdsat - Vds; T2 = Vds / (3.0 * T1); T3 = T2 * Vds; T9 = 0.25 * CoxWL; T4 = T9 * Alphaz; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4phi - 0.5 * (Vds - T3)); T5 = T3 / T1; here->BSIM4cggb = CoxWL * (1.0 - T5 * dVdsat_dVg) * dVgs_eff_dVg; tmp = -CoxWL * T5 * dVdsat_dVb; here->BSIM4cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5); here->BSIM4cgsb = -(here->BSIM4cggb + here->BSIM4cgdb + tmp); T6 = 1.0 / Vdsat; dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg); dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb); T6 = 8.0 * Vdsat * Vdsat - 6.0 * Vdsat * Vds + 1.2 * Vds * Vds; T8 = T2 / T1; T7 = Vds - T1 - T8 * T6; qdrn = T4 * T7; T7 *= T9; tmp = T8 / T1; tmp1 = T4 * (2.0 - 4.0 * tmp * T6 + T8 * (16.0 * Vdsat - 6.0 * Vds)); here->BSIM4cdgb = (T7 * dAlphaz_dVg - tmp1 * dVdsat_dVg) * dVgs_eff_dVg; T10 = T7 * dAlphaz_dVb - tmp1 * dVdsat_dVb; here->BSIM4cddb = T4 * (2.0 - (1.0 / (3.0 * T1 * T1) + 2.0 * tmp) * T6 + T8 * (6.0 * Vdsat - 2.4 * Vds)); here->BSIM4cdsb = -(here->BSIM4cdgb + T10 + here->BSIM4cddb); T7 = 2.0 * (T1 + T3); qbulk = -(qgate - T4 * T7); T7 *= T9; T0 = 4.0 * T4 * (1.0 - T5); T12 = (-T7 * dAlphaz_dVg - T0 * dVdsat_dVg) * dVgs_eff_dVg - here->BSIM4cdgb; /*4.6.2*/ T11 = -T7 * dAlphaz_dVb - T10 - T0 * dVdsat_dVb; T10 = -4.0 * T4 * (T2 - 0.5 + 0.5 * T5) - here->BSIM4cddb; tmp = -(T10 + T11 + T12); here->BSIM4cbgb = -(here->BSIM4cggb + here->BSIM4cdgb + T12); here->BSIM4cbdb = -(here->BSIM4cgdb + here->BSIM4cddb + T10); here->BSIM4cbsb = -(here->BSIM4cgsb + here->BSIM4cdsb + tmp); } } else { /* 50/50 partitioning */ if (Vds >= Vdsat) { /* saturation region */ T1 = Vdsat / 3.0; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4phi - T1); T2 = -Two_Third_CoxWL * Vgst; qbulk = -(qgate + T2); qdrn = 0.5 * T2; here->BSIM4cggb = One_Third_CoxWL * (3.0 - dVdsat_dVg) * dVgs_eff_dVg; T2 = -One_Third_CoxWL * dVdsat_dVb; here->BSIM4cgsb = -(here->BSIM4cggb + T2); here->BSIM4cgdb = 0.0; here->BSIM4cdgb = -One_Third_CoxWL * dVgs_eff_dVg; here->BSIM4cddb = 0.0; T4 = One_Third_CoxWL * dVth_dVb; here->BSIM4cdsb = -(T4 + here->BSIM4cdgb); here->BSIM4cbgb = -(here->BSIM4cggb - Two_Third_CoxWL * dVgs_eff_dVg); T3 = -(T2 + Two_Third_CoxWL * dVth_dVb); here->BSIM4cbsb = -(here->BSIM4cbgb + T3); here->BSIM4cbdb = 0.0; } else { /* linear region */ Alphaz = Vgst / Vdsat; T1 = 2.0 * Vdsat - Vds; T2 = Vds / (3.0 * T1); T3 = T2 * Vds; T9 = 0.25 * CoxWL; T4 = T9 * Alphaz; qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4phi - 0.5 * (Vds - T3)); T5 = T3 / T1; here->BSIM4cggb = CoxWL * (1.0 - T5 * dVdsat_dVg) * dVgs_eff_dVg; tmp = -CoxWL * T5 * dVdsat_dVb; here->BSIM4cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5); here->BSIM4cgsb = -(here->BSIM4cggb + here->BSIM4cgdb + tmp); T6 = 1.0 / Vdsat; dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg); dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb); T7 = T1 + T3; qdrn = -T4 * T7; qbulk = - (qgate + qdrn + qdrn); T7 *= T9; T0 = T4 * (2.0 * T5 - 2.0); here->BSIM4cdgb = (T0 * dVdsat_dVg - T7 * dAlphaz_dVg) * dVgs_eff_dVg; T12 = T0 * dVdsat_dVb - T7 * dAlphaz_dVb; here->BSIM4cddb = T4 * (1.0 - 2.0 * T2 - T5); here->BSIM4cdsb = -(here->BSIM4cdgb + T12 + here->BSIM4cddb); here->BSIM4cbgb = -(here->BSIM4cggb + 2.0 * here->BSIM4cdgb); here->BSIM4cbdb = -(here->BSIM4cgdb + 2.0 * here->BSIM4cddb); here->BSIM4cbsb = -(here->BSIM4cgsb + 2.0 * here->BSIM4cdsb); } /* end of linear region */ } /* end of 50/50 partition */ } /* end of inversion */ } /* end of capMod=0 */ else { if (Vbseff < 0.0) { VbseffCV = Vbseff; dVbseffCV_dVb = 1.0; } else { VbseffCV = pParam->BSIM4phi - Phis; dVbseffCV_dVb = -dPhis_dVb; } CoxWL = model->BSIM4coxe * pParam->BSIM4weffCV * pParam->BSIM4leffCV * here->BSIM4nf; if(model->BSIM4cvchargeMod == 0) { /* Seperate VgsteffCV with noff and voffcv */ noff = n * pParam->BSIM4noff; dnoff_dVd = pParam->BSIM4noff * dn_dVd; dnoff_dVb = pParam->BSIM4noff * dn_dVb; T0 = Vtm * noff; voffcv = pParam->BSIM4voffcv; VgstNVt = (Vgst - voffcv) / T0; if (VgstNVt > EXP_THRESHOLD) { Vgsteff = Vgst - voffcv; dVgsteff_dVg = dVgs_eff_dVg; dVgsteff_dVd = -dVth_dVd; dVgsteff_dVb = -dVth_dVb; } else if (VgstNVt < -EXP_THRESHOLD) { Vgsteff = T0 * log(1.0 + MIN_EXP); dVgsteff_dVg = 0.0; dVgsteff_dVd = Vgsteff / noff; dVgsteff_dVb = dVgsteff_dVd * dnoff_dVb; dVgsteff_dVd *= dnoff_dVd; } else { ExpVgst = exp(VgstNVt); Vgsteff = T0 * log(1.0 + ExpVgst); dVgsteff_dVg = ExpVgst / (1.0 + ExpVgst); dVgsteff_dVd = -dVgsteff_dVg * (dVth_dVd + (Vgst - voffcv) / noff * dnoff_dVd) + Vgsteff / noff * dnoff_dVd; dVgsteff_dVb = -dVgsteff_dVg * (dVth_dVb + (Vgst - voffcv) / noff * dnoff_dVb) + Vgsteff / noff * dnoff_dVb; dVgsteff_dVg *= dVgs_eff_dVg; } /* End of VgsteffCV for cvchargeMod = 0 */ } else { T0 = n * Vtm; T1 = pParam->BSIM4mstarcv * Vgst; T2 = T1 / T0; if (T2 > EXP_THRESHOLD) { T10 = T1; dT10_dVg = pParam->BSIM4mstarcv * dVgs_eff_dVg; dT10_dVd = -dVth_dVd * pParam->BSIM4mstarcv; dT10_dVb = -dVth_dVb * pParam->BSIM4mstarcv; } else if (T2 < -EXP_THRESHOLD) { T10 = Vtm * log(1.0 + MIN_EXP); dT10_dVg = 0.0; dT10_dVd = T10 * dn_dVd; dT10_dVb = T10 * dn_dVb; T10 *= n; } else { ExpVgst = exp(T2); T3 = Vtm * log(1.0 + ExpVgst); T10 = n * T3; dT10_dVg = pParam->BSIM4mstarcv * ExpVgst / (1.0 + ExpVgst); dT10_dVb = T3 * dn_dVb - dT10_dVg * (dVth_dVb + Vgst * dn_dVb / n); dT10_dVd = T3 * dn_dVd - dT10_dVg * (dVth_dVd + Vgst * dn_dVd / n); dT10_dVg *= dVgs_eff_dVg; } T1 = pParam->BSIM4voffcbncv - (1.0 - pParam->BSIM4mstarcv) * Vgst; T2 = T1 / T0; if (T2 < -EXP_THRESHOLD) { T3 = model->BSIM4coxe * MIN_EXP / pParam->BSIM4cdep0; T9 = pParam->BSIM4mstarcv + T3 * n; dT9_dVg = 0.0; dT9_dVd = dn_dVd * T3; dT9_dVb = dn_dVb * T3; } else if (T2 > EXP_THRESHOLD) { T3 = model->BSIM4coxe * MAX_EXP / pParam->BSIM4cdep0; T9 = pParam->BSIM4mstarcv + T3 * n; dT9_dVg = 0.0; dT9_dVd = dn_dVd * T3; dT9_dVb = dn_dVb * T3; } else { ExpVgst = exp(T2); T3 = model->BSIM4coxe / pParam->BSIM4cdep0; T4 = T3 * ExpVgst; T5 = T1 * T4 / T0; T9 = pParam->BSIM4mstarcv + n * T4; dT9_dVg = T3 * (pParam->BSIM4mstarcv - 1.0) * ExpVgst / Vtm; dT9_dVb = T4 * dn_dVb - dT9_dVg * dVth_dVb - T5 * dn_dVb; dT9_dVd = T4 * dn_dVd - dT9_dVg * dVth_dVd - T5 * dn_dVd; dT9_dVg *= dVgs_eff_dVg; } Vgsteff = T10 / T9; T11 = T9 * T9; dVgsteff_dVg = (T9 * dT10_dVg - T10 * dT9_dVg) / T11; dVgsteff_dVd = (T9 * dT10_dVd - T10 * dT9_dVd) / T11; dVgsteff_dVb = (T9 * dT10_dVb - T10 * dT9_dVb) / T11; /* End of VgsteffCV for cvchargeMod = 1 */ } if (model->BSIM4capMod == 1) { Vfb = here->BSIM4vfbzb; V3 = Vfb - Vgs_eff + VbseffCV - DELTA_3; if (Vfb <= 0.0) T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfb); else T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfb); T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = Vfb - 0.5 * (V3 + T0); dVfbeff_dVg = T1 * dVgs_eff_dVg; dVfbeff_dVb = -T1 * dVbseffCV_dVb; Qac0 = CoxWL * (Vfbeff - Vfb); dQac0_dVg = CoxWL * dVfbeff_dVg; dQac0_dVb = CoxWL * dVfbeff_dVb; T0 = 0.5 * pParam->BSIM4k1ox; T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff; if (pParam->BSIM4k1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->BSIM4k1ox; T2 = CoxWL; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWL * T0 / T1; } Qsub0 = CoxWL * pParam->BSIM4k1ox * (T1 - T0); dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg); dQsub0_dVd = -T2 * dVgsteff_dVd; dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb + dVgsteff_dVb); AbulkCV = Abulk0 * pParam->BSIM4abulkCVfactor; dAbulkCV_dVb = pParam->BSIM4abulkCVfactor * dAbulk0_dVb; VdsatCV = Vgsteff / AbulkCV; T0 = VdsatCV - Vds - DELTA_4; dT0_dVg = 1.0 / AbulkCV; dT0_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV; T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * VdsatCV); dT1_dVg = (T0 + DELTA_4 + DELTA_4) / T1; dT1_dVd = -T0 / T1; dT1_dVb = dT1_dVg * dT0_dVb; dT1_dVg *= dT0_dVg; if (T0 >= 0.0) { VdseffCV = VdsatCV - 0.5 * (T0 + T1); dVdseffCV_dVg = 0.5 * (dT0_dVg - dT1_dVg); dVdseffCV_dVd = 0.5 * (1.0 - dT1_dVd); dVdseffCV_dVb = 0.5 * (dT0_dVb - dT1_dVb); } else { T3 = (DELTA_4 + DELTA_4) / (T1 - T0); T4 = 1.0 - T3; T5 = VdsatCV * T3 / (T1 - T0); VdseffCV = VdsatCV * T4; dVdseffCV_dVg = dT0_dVg * T4 + T5 * (dT1_dVg - dT0_dVg); dVdseffCV_dVd = T5 * (dT1_dVd + 1.0); dVdseffCV_dVb = dT0_dVb * (T4 - T5) + T5 * dT1_dVb; } if (Vds == 0.0) { VdseffCV = 0.0; dVdseffCV_dVg = 0.0; dVdseffCV_dVb = 0.0; } T0 = AbulkCV * VdseffCV; T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1.0e-20); T2 = VdseffCV / T1; T3 = T0 * T2; T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV); T5 = (6.0 * T0 * (4.0 * Vgsteff - T0) / (T1 * T1) - 0.5); T6 = 12.0 * T2 * T2 * Vgsteff; qgate = CoxWL * (Vgsteff - 0.5 * VdseffCV + T3); Cgg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg); Cgd1 = CoxWL * T5 * dVdseffCV_dVd + Cgg1 * dVgsteff_dVd; Cgb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Cgg1 * dVgsteff_dVb; Cgg1 *= dVgsteff_dVg; T7 = 1.0 - AbulkCV; qbulk = CoxWL * T7 * (0.5 * VdseffCV - T3); T4 = -T7 * (T4 - 1.0); T5 = -T7 * T5; T6 = -(T7 * T6 + (0.5 * VdseffCV - T3)); Cbg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg); Cbd1 = CoxWL * T5 * dVdseffCV_dVd + Cbg1 * dVgsteff_dVd; Cbb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Cbg1 * dVgsteff_dVb; Cbg1 *= dVgsteff_dVg; if (model->BSIM4xpart > 0.5) { /* 0/100 Charge petition model */ T1 = T1 + T1; qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0 - T0 * T0 / T1); T7 = (4.0 * Vgsteff - T0) / (T1 * T1); T4 = -(0.5 + 24.0 * T0 * T0 / (T1 * T1)); T5 = -(0.25 * AbulkCV - 12.0 * AbulkCV * T0 * T7); T6 = -(0.25 * VdseffCV - 12.0 * T0 * VdseffCV * T7); Csg = CoxWL * (T4 + T5 * dVdseffCV_dVg); Csd = CoxWL * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd; Csb = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Csg * dVgsteff_dVb; Csg *= dVgsteff_dVg; } else if (model->BSIM4xpart < 0.5) { /* 40/60 Charge petition model */ T1 = T1 / 12.0; T2 = 0.5 * CoxWL / (T1 * T1); T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff * (Vgsteff - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T2 * T3; T7 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0) + 0.4 * T0 * T0; T4 = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0 * Vgsteff - 8.0 * T0 / 3.0) + 2.0 * T0 * T0 / 3.0); T5 = (qsrc / T1 + T2 * T7) * AbulkCV; T6 = (qsrc / T1 * VdseffCV + T2 * T7 * VdseffCV); Csg = (T4 + T5 * dVdseffCV_dVg); Csd = T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd; Csb = (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Csg * dVgsteff_dVb; Csg *= dVgsteff_dVg; } else { /* 50/50 Charge petition model */ qsrc = -0.5 * (qgate + qbulk); Csg = -0.5 * (Cgg1 + Cbg1); Csb = -0.5 * (Cgb1 + Cbb1); Csd = -0.5 * (Cgd1 + Cbd1); } qgate += Qac0 + Qsub0; qbulk -= (Qac0 + Qsub0); qdrn = -(qgate + qbulk + qsrc); Cgg = dQac0_dVg + dQsub0_dVg + Cgg1; Cgd = dQsub0_dVd + Cgd1; Cgb = dQac0_dVb + dQsub0_dVb + Cgb1; Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg; Cbd = Cbd1 - dQsub0_dVd; Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb; Cgb *= dVbseff_dVb; Cbb *= dVbseff_dVb; Csb *= dVbseff_dVb; here->BSIM4cggb = Cgg; here->BSIM4cgsb = -(Cgg + Cgd + Cgb); here->BSIM4cgdb = Cgd; here->BSIM4cdgb = -(Cgg + Cbg + Csg); here->BSIM4cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb + Csg + Csd + Csb); here->BSIM4cddb = -(Cgd + Cbd + Csd); here->BSIM4cbgb = Cbg; here->BSIM4cbsb = -(Cbg + Cbd + Cbb); here->BSIM4cbdb = Cbd; } /* Charge-Thickness capMod (CTM) begins */ else if (model->BSIM4capMod == 2) { V3 = here->BSIM4vfbzb - Vgs_eff + VbseffCV - DELTA_3; if (here->BSIM4vfbzb <= 0.0) T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * here->BSIM4vfbzb); else T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * here->BSIM4vfbzb); T1 = 0.5 * (1.0 + V3 / T0); Vfbeff = here->BSIM4vfbzb - 0.5 * (V3 + T0); dVfbeff_dVg = T1 * dVgs_eff_dVg; dVfbeff_dVb = -T1 * dVbseffCV_dVb; Cox = here->BSIM4coxp; Tox = 1.0e8 * here->BSIM4toxp; T0 = (Vgs_eff - VbseffCV - here->BSIM4vfbzb) / Tox; dT0_dVg = dVgs_eff_dVg / Tox; dT0_dVb = -dVbseffCV_dVb / Tox; tmp = T0 * pParam->BSIM4acde; if ((-EXP_THRESHOLD < tmp) && (tmp < EXP_THRESHOLD)) { Tcen = pParam->BSIM4ldeb * exp(tmp); dTcen_dVg = pParam->BSIM4acde * Tcen; dTcen_dVb = dTcen_dVg * dT0_dVb; dTcen_dVg *= dT0_dVg; } else if (tmp <= -EXP_THRESHOLD) { Tcen = pParam->BSIM4ldeb * MIN_EXP; dTcen_dVg = dTcen_dVb = 0.0; } else { Tcen = pParam->BSIM4ldeb * MAX_EXP; dTcen_dVg = dTcen_dVb = 0.0; } LINK = 1.0e-3 * here->BSIM4toxp; V3 = pParam->BSIM4ldeb - Tcen - LINK; V4 = sqrt(V3 * V3 + 4.0 * LINK * pParam->BSIM4ldeb); Tcen = pParam->BSIM4ldeb - 0.5 * (V3 + V4); T1 = 0.5 * (1.0 + V3 / V4); dTcen_dVg *= T1; dTcen_dVb *= T1; Ccen = epssub / Tcen; T2 = Cox / (Cox + Ccen); Coxeff = T2 * Ccen; T3 = -Ccen / Tcen; dCoxeff_dVg = T2 * T2 * T3; dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb; dCoxeff_dVg *= dTcen_dVg; CoxWLcen = CoxWL * Coxeff / model->BSIM4coxe; Qac0 = CoxWLcen * (Vfbeff - here->BSIM4vfbzb); QovCox = Qac0 / Coxeff; dQac0_dVg = CoxWLcen * dVfbeff_dVg + QovCox * dCoxeff_dVg; dQac0_dVb = CoxWLcen * dVfbeff_dVb + QovCox * dCoxeff_dVb; T0 = 0.5 * pParam->BSIM4k1ox; T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff; if (pParam->BSIM4k1ox == 0.0) { T1 = 0.0; T2 = 0.0; } else if (T3 < 0.0) { T1 = T0 + T3 / pParam->BSIM4k1ox; T2 = CoxWLcen; } else { T1 = sqrt(T0 * T0 + T3); T2 = CoxWLcen * T0 / T1; } Qsub0 = CoxWLcen * pParam->BSIM4k1ox * (T1 - T0); QovCox = Qsub0 / Coxeff; dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg) + QovCox * dCoxeff_dVg; dQsub0_dVd = -T2 * dVgsteff_dVd; dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb + dVgsteff_dVb) + QovCox * dCoxeff_dVb; /* Gate-bias dependent delta Phis begins */ if (pParam->BSIM4k1ox <= 0.0) { Denomi = 0.25 * pParam->BSIM4moin * Vtm; T0 = 0.5 * pParam->BSIM4sqrtPhi; } else { Denomi = pParam->BSIM4moin * Vtm * pParam->BSIM4k1ox * pParam->BSIM4k1ox; T0 = pParam->BSIM4k1ox * pParam->BSIM4sqrtPhi; } T1 = 2.0 * T0 + Vgsteff; DeltaPhi = Vtm * log(1.0 + T1 * Vgsteff / Denomi); dDeltaPhi_dVg = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff); /* End of delta Phis */ /* VgDP = Vgsteff - DeltaPhi */ T0 = Vgsteff - DeltaPhi - 0.001; dT0_dVg = 1.0 - dDeltaPhi_dVg; T1 = sqrt(T0 * T0 + Vgsteff * 0.004); VgDP = 0.5 * (T0 + T1); dVgDP_dVg = 0.5 * (dT0_dVg + (T0 * dT0_dVg + 0.002) / T1); Tox += Tox; /* WDLiu: Tcen reevaluated below due to different Vgsteff */ T0 = (Vgsteff + here->BSIM4vtfbphi2) / Tox; tmp = exp(model->BSIM4bdos * 0.7 * log(T0)); T1 = 1.0 + tmp; T2 = model->BSIM4bdos * 0.7 * tmp / (T0 * Tox); Tcen = model->BSIM4ados * 1.9e-9 / T1; dTcen_dVg = -Tcen * T2 / T1; dTcen_dVd = dTcen_dVg * dVgsteff_dVd; dTcen_dVb = dTcen_dVg * dVgsteff_dVb; dTcen_dVg *= dVgsteff_dVg; Ccen = epssub / Tcen; T0 = Cox / (Cox + Ccen); Coxeff = T0 * Ccen; T1 = -Ccen / Tcen; dCoxeff_dVg = T0 * T0 * T1; dCoxeff_dVd = dCoxeff_dVg * dTcen_dVd; dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb; dCoxeff_dVg *= dTcen_dVg; CoxWLcen = CoxWL * Coxeff / model->BSIM4coxe; AbulkCV = Abulk0 * pParam->BSIM4abulkCVfactor; dAbulkCV_dVb = pParam->BSIM4abulkCVfactor * dAbulk0_dVb; VdsatCV = VgDP / AbulkCV; T0 = VdsatCV - Vds - DELTA_4; dT0_dVg = dVgDP_dVg / AbulkCV; dT0_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV; T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * VdsatCV); dT1_dVg = (T0 + DELTA_4 + DELTA_4) / T1; dT1_dVd = -T0 / T1; dT1_dVb = dT1_dVg * dT0_dVb; dT1_dVg *= dT0_dVg; if (T0 >= 0.0) { VdseffCV = VdsatCV - 0.5 * (T0 + T1); dVdseffCV_dVg = 0.5 * (dT0_dVg - dT1_dVg); dVdseffCV_dVd = 0.5 * (1.0 - dT1_dVd); dVdseffCV_dVb = 0.5 * (dT0_dVb - dT1_dVb); } else { T3 = (DELTA_4 + DELTA_4) / (T1 - T0); T4 = 1.0 - T3; T5 = VdsatCV * T3 / (T1 - T0); VdseffCV = VdsatCV * T4; dVdseffCV_dVg = dT0_dVg * T4 + T5 * (dT1_dVg - dT0_dVg); dVdseffCV_dVd = T5 * (dT1_dVd + 1.0); dVdseffCV_dVb = dT0_dVb * (T4 - T5) + T5 * dT1_dVb; } if (Vds == 0.0) { VdseffCV = 0.0; dVdseffCV_dVg = 0.0; dVdseffCV_dVb = 0.0; } T0 = AbulkCV * VdseffCV; T1 = VgDP; T2 = 12.0 * (T1 - 0.5 * T0 + 1.0e-20); T3 = T0 / T2; T4 = 1.0 - 12.0 * T3 * T3; T5 = AbulkCV * (6.0 * T0 * (4.0 * T1 - T0) / (T2 * T2) - 0.5); T6 = T5 * VdseffCV / AbulkCV; qgate = CoxWLcen * (T1 - T0 * (0.5 - T3)); QovCox = qgate / Coxeff; Cgg1 = CoxWLcen * (T4 * dVgDP_dVg + T5 * dVdseffCV_dVg); Cgd1 = CoxWLcen * T5 * dVdseffCV_dVd + Cgg1 * dVgsteff_dVd + QovCox * dCoxeff_dVd; Cgb1 = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Cgg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb; Cgg1 = Cgg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg; T7 = 1.0 - AbulkCV; T8 = T2 * T2; T9 = 12.0 * T7 * T0 * T0 / (T8 * AbulkCV); T10 = T9 * dVgDP_dVg; T11 = -T7 * T5 / AbulkCV; T12 = -(T9 * T1 / AbulkCV + VdseffCV * (0.5 - T0 / T2)); qbulk = CoxWLcen * T7 * (0.5 * VdseffCV - T0 * VdseffCV / T2); QovCox = qbulk / Coxeff; Cbg1 = CoxWLcen * (T10 + T11 * dVdseffCV_dVg); Cbd1 = CoxWLcen * T11 * dVdseffCV_dVd + Cbg1 * dVgsteff_dVd + QovCox * dCoxeff_dVd; Cbb1 = CoxWLcen * (T11 * dVdseffCV_dVb + T12 * dAbulkCV_dVb) + Cbg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb; Cbg1 = Cbg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg; if (model->BSIM4xpart > 0.5) { /* 0/100 partition */ qsrc = -CoxWLcen * (T1 / 2.0 + T0 / 4.0 - 0.5 * T0 * T0 / T2); QovCox = qsrc / Coxeff; T2 += T2; T3 = T2 * T2; T7 = -(0.25 - 12.0 * T0 * (4.0 * T1 - T0) / T3); T4 = -(0.5 + 24.0 * T0 * T0 / T3) * dVgDP_dVg; T5 = T7 * AbulkCV; T6 = T7 * VdseffCV; Csg = CoxWLcen * (T4 + T5 * dVdseffCV_dVg); Csd = CoxWLcen * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd + QovCox * dCoxeff_dVd; Csb = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb) + Csg * dVgsteff_dVb + QovCox * dCoxeff_dVb; Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg; } else if (model->BSIM4xpart < 0.5) { /* 40/60 partition */ T2 = T2 / 12.0; T3 = 0.5 * CoxWLcen / (T2 * T2); T4 = T1 * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0 * T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0; qsrc = -T3 * T4; QovCox = qsrc / Coxeff; T8 = 4.0 / 3.0 * T1 * (T1 - T0) + 0.4 * T0 * T0; T5 = -2.0 * qsrc / T2 - T3 * (T1 * (3.0 * T1 - 8.0 * T0 / 3.0) + 2.0 * T0 * T0 / 3.0); T6 = AbulkCV * (qsrc / T2 + T3 * T8); T7 = T6 * VdseffCV / AbulkCV; Csg = T5 * dVgDP_dVg + T6 * dVdseffCV_dVg; Csd = Csg * dVgsteff_dVd + T6 * dVdseffCV_dVd + QovCox * dCoxeff_dVd; Csb = Csg * dVgsteff_dVb + T6 * dVdseffCV_dVb + T7 * dAbulkCV_dVb + QovCox * dCoxeff_dVb; Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg; } else { /* 50/50 partition */ qsrc = -0.5 * qgate; Csg = -0.5 * Cgg1; Csd = -0.5 * Cgd1; Csb = -0.5 * Cgb1; } qgate += Qac0 + Qsub0 - qbulk; qbulk -= (Qac0 + Qsub0); qdrn = -(qgate + qbulk + qsrc); Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg; Cbd = Cbd1 - dQsub0_dVd; Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb; Cgg = Cgg1 - Cbg; Cgd = Cgd1 - Cbd; Cgb = Cgb1 - Cbb; Cgb *= dVbseff_dVb; Cbb *= dVbseff_dVb; Csb *= dVbseff_dVb; here->BSIM4cggb = Cgg; here->BSIM4cgsb = -(Cgg + Cgd + Cgb); here->BSIM4cgdb = Cgd; here->BSIM4cdgb = -(Cgg + Cbg + Csg); here->BSIM4cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb + Csg + Csd + Csb); here->BSIM4cddb = -(Cgd + Cbd + Csd); here->BSIM4cbgb = Cbg; here->BSIM4cbsb = -(Cbg + Cbd + Cbb); here->BSIM4cbdb = Cbd; } /* End of CTM */ } here->BSIM4csgb = - here->BSIM4cggb - here->BSIM4cdgb - here->BSIM4cbgb; here->BSIM4csdb = - here->BSIM4cgdb - here->BSIM4cddb - here->BSIM4cbdb; here->BSIM4cssb = - here->BSIM4cgsb - here->BSIM4cdsb - here->BSIM4cbsb; here->BSIM4cgbb = - here->BSIM4cgdb - here->BSIM4cggb - here->BSIM4cgsb; here->BSIM4cdbb = - here->BSIM4cddb - here->BSIM4cdgb - here->BSIM4cdsb; here->BSIM4cbbb = - here->BSIM4cbgb - here->BSIM4cbdb - here->BSIM4cbsb; here->BSIM4csbb = - here->BSIM4cgbb - here->BSIM4cdbb - here->BSIM4cbbb; here->BSIM4qgate = qgate; here->BSIM4qbulk = qbulk; here->BSIM4qdrn = qdrn; here->BSIM4qsrc = -(qgate + qbulk + qdrn); /* NQS begins */ if ((here->BSIM4trnqsMod) || (here->BSIM4acnqsMod)) { here->BSIM4qchqs = qcheq = -(qbulk + qgate); here->BSIM4cqgb = -(here->BSIM4cggb + here->BSIM4cbgb); here->BSIM4cqdb = -(here->BSIM4cgdb + here->BSIM4cbdb); here->BSIM4cqsb = -(here->BSIM4cgsb + here->BSIM4cbsb); here->BSIM4cqbb = -(here->BSIM4cqgb + here->BSIM4cqdb + here->BSIM4cqsb); CoxWL = model->BSIM4coxe * pParam->BSIM4weffCV * here->BSIM4nf * pParam->BSIM4leffCV; T1 = here->BSIM4gcrg / CoxWL; /* 1 / tau */ here->BSIM4gtau = T1 * ScalingFactor; if (here->BSIM4acnqsMod) here->BSIM4taunet = 1.0 / T1; *(ckt->CKTstate0 + here->BSIM4qcheq) = qcheq; if (ckt->CKTmode & MODEINITTRAN) *(ckt->CKTstate1 + here->BSIM4qcheq) = *(ckt->CKTstate0 + here->BSIM4qcheq); if (here->BSIM4trnqsMod) { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qcheq); if (error) return(error); } } finished: /* Calculate junction C-V */ if (ChargeComputationNeeded) { czbd = model->BSIM4DunitAreaTempJctCap * here->BSIM4Adeff; /* bug fix */ czbs = model->BSIM4SunitAreaTempJctCap * here->BSIM4Aseff; czbdsw = model->BSIM4DunitLengthSidewallTempJctCap * here->BSIM4Pdeff; czbdswg = model->BSIM4DunitLengthGateSidewallTempJctCap * pParam->BSIM4weffCJ * here->BSIM4nf; czbssw = model->BSIM4SunitLengthSidewallTempJctCap * here->BSIM4Pseff; czbsswg = model->BSIM4SunitLengthGateSidewallTempJctCap * pParam->BSIM4weffCJ * here->BSIM4nf; MJS = model->BSIM4SbulkJctBotGradingCoeff; MJSWS = model->BSIM4SbulkJctSideGradingCoeff; MJSWGS = model->BSIM4SbulkJctGateSideGradingCoeff; MJD = model->BSIM4DbulkJctBotGradingCoeff; MJSWD = model->BSIM4DbulkJctSideGradingCoeff; MJSWGD = model->BSIM4DbulkJctGateSideGradingCoeff; /* Source Bulk Junction */ if (vbs_jct == 0.0) { *(ckt->CKTstate0 + here->BSIM4qbs) = 0.0; here->BSIM4capbs = czbs + czbssw + czbsswg; } else if (vbs_jct < 0.0) { if (czbs > 0.0) { arg = 1.0 - vbs_jct / model->BSIM4PhiBS; if (MJS == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJS * log(arg)); *(ckt->CKTstate0 + here->BSIM4qbs) = model->BSIM4PhiBS * czbs * (1.0 - arg * sarg) / (1.0 - MJS); here->BSIM4capbs = czbs * sarg; } else { *(ckt->CKTstate0 + here->BSIM4qbs) = 0.0; here->BSIM4capbs = 0.0; } if (czbssw > 0.0) { arg = 1.0 - vbs_jct / model->BSIM4PhiBSWS; if (MJSWS == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSWS * log(arg)); *(ckt->CKTstate0 + here->BSIM4qbs) += model->BSIM4PhiBSWS * czbssw * (1.0 - arg * sarg) / (1.0 - MJSWS); here->BSIM4capbs += czbssw * sarg; } if (czbsswg > 0.0) { arg = 1.0 - vbs_jct / model->BSIM4PhiBSWGS; if (MJSWGS == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSWGS * log(arg)); *(ckt->CKTstate0 + here->BSIM4qbs) += model->BSIM4PhiBSWGS * czbsswg * (1.0 - arg * sarg) / (1.0 - MJSWGS); here->BSIM4capbs += czbsswg * sarg; } } else { T0 = czbs + czbssw + czbsswg; T1 = vbs_jct * (czbs * MJS / model->BSIM4PhiBS + czbssw * MJSWS / model->BSIM4PhiBSWS + czbsswg * MJSWGS / model->BSIM4PhiBSWGS); *(ckt->CKTstate0 + here->BSIM4qbs) = vbs_jct * (T0 + 0.5 * T1); here->BSIM4capbs = T0 + T1; } /* Drain Bulk Junction */ if (vbd_jct == 0.0) { *(ckt->CKTstate0 + here->BSIM4qbd) = 0.0; here->BSIM4capbd = czbd + czbdsw + czbdswg; } else if (vbd_jct < 0.0) { if (czbd > 0.0) { arg = 1.0 - vbd_jct / model->BSIM4PhiBD; if (MJD == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJD * log(arg)); *(ckt->CKTstate0 + here->BSIM4qbd) = model->BSIM4PhiBD* czbd * (1.0 - arg * sarg) / (1.0 - MJD); here->BSIM4capbd = czbd * sarg; } else { *(ckt->CKTstate0 + here->BSIM4qbd) = 0.0; here->BSIM4capbd = 0.0; } if (czbdsw > 0.0) { arg = 1.0 - vbd_jct / model->BSIM4PhiBSWD; if (MJSWD == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSWD * log(arg)); *(ckt->CKTstate0 + here->BSIM4qbd) += model->BSIM4PhiBSWD * czbdsw * (1.0 - arg * sarg) / (1.0 - MJSWD); here->BSIM4capbd += czbdsw * sarg; } if (czbdswg > 0.0) { arg = 1.0 - vbd_jct / model->BSIM4PhiBSWGD; if (MJSWGD == 0.5) sarg = 1.0 / sqrt(arg); else sarg = exp(-MJSWGD * log(arg)); *(ckt->CKTstate0 + here->BSIM4qbd) += model->BSIM4PhiBSWGD * czbdswg * (1.0 - arg * sarg) / (1.0 - MJSWGD); here->BSIM4capbd += czbdswg * sarg; } } else { T0 = czbd + czbdsw + czbdswg; T1 = vbd_jct * (czbd * MJD / model->BSIM4PhiBD + czbdsw * MJSWD / model->BSIM4PhiBSWD + czbdswg * MJSWGD / model->BSIM4PhiBSWGD); *(ckt->CKTstate0 + here->BSIM4qbd) = vbd_jct * (T0 + 0.5 * T1); here->BSIM4capbd = T0 + T1; } } /* * check convergence */ if ((here->BSIM4off == 0) || (!(ckt->CKTmode & MODEINITFIX))) { if (Check == 1) { ckt->CKTnoncon++; #ifndef NEWCONV } else { if (here->BSIM4mode >= 0) { Idtot = here->BSIM4cd + here->BSIM4csub + here->BSIM4Igidl - here->BSIM4cbd; } else { Idtot = here->BSIM4cd + here->BSIM4cbd - here->BSIM4Igidl; /* bugfix */ } tol0 = ckt->CKTreltol * MAX(fabs(cdhat), fabs(Idtot)) + ckt->CKTabstol; tol1 = ckt->CKTreltol * MAX(fabs(cseshat), fabs(Isestot)) + ckt->CKTabstol; tol2 = ckt->CKTreltol * MAX(fabs(cdedhat), fabs(Idedtot)) + ckt->CKTabstol; tol3 = ckt->CKTreltol * MAX(fabs(cgshat), fabs(Igstot)) + ckt->CKTabstol; tol4 = ckt->CKTreltol * MAX(fabs(cgdhat), fabs(Igdtot)) + ckt->CKTabstol; tol5 = ckt->CKTreltol * MAX(fabs(cgbhat), fabs(Igbtot)) + ckt->CKTabstol; if ((fabs(cdhat - Idtot) >= tol0) || (fabs(cseshat - Isestot) >= tol1) || (fabs(cdedhat - Idedtot) >= tol2)) { ckt->CKTnoncon++; } else if ((fabs(cgshat - Igstot) >= tol3) || (fabs(cgdhat - Igdtot) >= tol4) || (fabs(cgbhat - Igbtot) >= tol5)) { ckt->CKTnoncon++; } else { Ibtot = here->BSIM4cbs + here->BSIM4cbd - here->BSIM4Igidl - here->BSIM4Igisl - here->BSIM4csub; tol6 = ckt->CKTreltol * MAX(fabs(cbhat), fabs(Ibtot)) + ckt->CKTabstol; if (fabs(cbhat - Ibtot) > tol6) { ckt->CKTnoncon++; } } #endif /* NEWCONV */ } } *(ckt->CKTstate0 + here->BSIM4vds) = vds; *(ckt->CKTstate0 + here->BSIM4vgs) = vgs; *(ckt->CKTstate0 + here->BSIM4vbs) = vbs; *(ckt->CKTstate0 + here->BSIM4vbd) = vbd; *(ckt->CKTstate0 + here->BSIM4vges) = vges; *(ckt->CKTstate0 + here->BSIM4vgms) = vgms; *(ckt->CKTstate0 + here->BSIM4vdbs) = vdbs; *(ckt->CKTstate0 + here->BSIM4vdbd) = vdbd; *(ckt->CKTstate0 + here->BSIM4vsbs) = vsbs; *(ckt->CKTstate0 + here->BSIM4vses) = vses; *(ckt->CKTstate0 + here->BSIM4vdes) = vdes; *(ckt->CKTstate0 + here->BSIM4qdef) = qdef; if (!ChargeComputationNeeded) goto line850; if (here->BSIM4rgateMod == 3) { vgdx = vgmd; vgsx = vgms; } else /* For rgateMod == 0, 1 and 2 */ { vgdx = vgd; vgsx = vgs; } if (model->BSIM4capMod == 0) { cgdo = pParam->BSIM4cgdo; qgdo = pParam->BSIM4cgdo * vgdx; cgso = pParam->BSIM4cgso; qgso = pParam->BSIM4cgso * vgsx; } else /* For both capMod == 1 and 2 */ { T0 = vgdx + DELTA_1; T1 = sqrt(T0 * T0 + 4.0 * DELTA_1); T2 = 0.5 * (T0 - T1); T3 = pParam->BSIM4weffCV * pParam->BSIM4cgdl; T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM4ckappad); cgdo = pParam->BSIM4cgdo + T3 - T3 * (1.0 - 1.0 / T4) * (0.5 - 0.5 * T0 / T1); qgdo = (pParam->BSIM4cgdo + T3) * vgdx - T3 * (T2 + 0.5 * pParam->BSIM4ckappad * (T4 - 1.0)); T0 = vgsx + DELTA_1; T1 = sqrt(T0 * T0 + 4.0 * DELTA_1); T2 = 0.5 * (T0 - T1); T3 = pParam->BSIM4weffCV * pParam->BSIM4cgsl; T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM4ckappas); cgso = pParam->BSIM4cgso + T3 - T3 * (1.0 - 1.0 / T4) * (0.5 - 0.5 * T0 / T1); qgso = (pParam->BSIM4cgso + T3) * vgsx - T3 * (T2 + 0.5 * pParam->BSIM4ckappas * (T4 - 1.0)); } if (here->BSIM4nf != 1.0) { cgdo *= here->BSIM4nf; cgso *= here->BSIM4nf; qgdo *= here->BSIM4nf; qgso *= here->BSIM4nf; } here->BSIM4cgdo = cgdo; here->BSIM4qgdo = qgdo; here->BSIM4cgso = cgso; here->BSIM4qgso = qgso; #ifndef NOBYPASS line755: #endif ag0 = ckt->CKTag[0]; if (here->BSIM4mode > 0) { if (here->BSIM4trnqsMod == 0) { qdrn -= qgdo; if (here->BSIM4rgateMod == 3) { gcgmgmb = (cgdo + cgso + pParam->BSIM4cgbo) * ag0; gcgmdb = -cgdo * ag0; gcgmsb = -cgso * ag0; gcgmbb = -pParam->BSIM4cgbo * ag0; gcdgmb = gcgmdb; gcsgmb = gcgmsb; gcbgmb = gcgmbb; gcggb = here->BSIM4cggb * ag0; gcgdb = here->BSIM4cgdb * ag0; gcgsb = here->BSIM4cgsb * ag0; gcgbb = -(gcggb + gcgdb + gcgsb); gcdgb = here->BSIM4cdgb * ag0; gcsgb = -(here->BSIM4cggb + here->BSIM4cbgb + here->BSIM4cdgb) * ag0; gcbgb = here->BSIM4cbgb * ag0; qgmb = pParam->BSIM4cgbo * vgmb; qgmid = qgdo + qgso + qgmb; qbulk -= qgmb; qsrc = -(qgate + qgmid + qbulk + qdrn); } else { gcggb = (here->BSIM4cggb + cgdo + cgso + pParam->BSIM4cgbo ) * ag0; gcgdb = (here->BSIM4cgdb - cgdo) * ag0; gcgsb = (here->BSIM4cgsb - cgso) * ag0; gcgbb = -(gcggb + gcgdb + gcgsb); gcdgb = (here->BSIM4cdgb - cgdo) * ag0; gcsgb = -(here->BSIM4cggb + here->BSIM4cbgb + here->BSIM4cdgb + cgso) * ag0; gcbgb = (here->BSIM4cbgb - pParam->BSIM4cgbo) * ag0; gcdgmb = gcsgmb = gcbgmb = 0.0; qgb = pParam->BSIM4cgbo * vgb; qgate += qgdo + qgso + qgb; qbulk -= qgb; qsrc = -(qgate + qbulk + qdrn); } gcddb = (here->BSIM4cddb + here->BSIM4capbd + cgdo) * ag0; gcdsb = here->BSIM4cdsb * ag0; gcsdb = -(here->BSIM4cgdb + here->BSIM4cbdb + here->BSIM4cddb) * ag0; gcssb = (here->BSIM4capbs + cgso - (here->BSIM4cgsb + here->BSIM4cbsb + here->BSIM4cdsb)) * ag0; if (!here->BSIM4rbodyMod) { gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb); gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb); gcbdb = (here->BSIM4cbdb - here->BSIM4capbd) * ag0; gcbsb = (here->BSIM4cbsb - here->BSIM4capbs) * ag0; gcdbdb = 0.0; gcsbsb = 0.0; } else { gcdbb = -(here->BSIM4cddb + here->BSIM4cdgb + here->BSIM4cdsb) * ag0; gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb) + here->BSIM4capbs * ag0; gcbdb = here->BSIM4cbdb * ag0; gcbsb = here->BSIM4cbsb * ag0; gcdbdb = -here->BSIM4capbd * ag0; gcsbsb = -here->BSIM4capbs * ag0; } gcbbb = -(gcbdb + gcbgb + gcbsb + gcbgmb); ggtg = ggtd = ggtb = ggts = 0.0; sxpart = 0.6; dxpart = 0.4; ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; } else { qcheq = here->BSIM4qchqs; CoxWL = model->BSIM4coxe * pParam->BSIM4weffCV * here->BSIM4nf * pParam->BSIM4leffCV; T0 = qdef * ScalingFactor / CoxWL; ggtg = here->BSIM4gtg = T0 * here->BSIM4gcrgg; ggtd = here->BSIM4gtd = T0 * here->BSIM4gcrgd; ggts = here->BSIM4gts = T0 * here->BSIM4gcrgs; ggtb = here->BSIM4gtb = T0 * here->BSIM4gcrgb; gqdef = ScalingFactor * ag0; gcqgb = here->BSIM4cqgb * ag0; gcqdb = here->BSIM4cqdb * ag0; gcqsb = here->BSIM4cqsb * ag0; gcqbb = here->BSIM4cqbb * ag0; if (fabs(qcheq) <= 1.0e-5 * CoxWL) { if (model->BSIM4xpart < 0.5) { dxpart = 0.4; } else if (model->BSIM4xpart > 0.5) { dxpart = 0.0; } else { dxpart = 0.5; } ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; } else { dxpart = qdrn / qcheq; Cdd = here->BSIM4cddb; Csd = -(here->BSIM4cgdb + here->BSIM4cddb + here->BSIM4cbdb); ddxpart_dVd = (Cdd - dxpart * (Cdd + Csd)) / qcheq; Cdg = here->BSIM4cdgb; Csg = -(here->BSIM4cggb + here->BSIM4cdgb + here->BSIM4cbgb); ddxpart_dVg = (Cdg - dxpart * (Cdg + Csg)) / qcheq; Cds = here->BSIM4cdsb; Css = -(here->BSIM4cgsb + here->BSIM4cdsb + here->BSIM4cbsb); ddxpart_dVs = (Cds - dxpart * (Cds + Css)) / qcheq; ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs); } sxpart = 1.0 - dxpart; dsxpart_dVd = -ddxpart_dVd; dsxpart_dVg = -ddxpart_dVg; dsxpart_dVs = -ddxpart_dVs; dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs); if (here->BSIM4rgateMod == 3) { gcgmgmb = (cgdo + cgso + pParam->BSIM4cgbo) * ag0; gcgmdb = -cgdo * ag0; gcgmsb = -cgso * ag0; gcgmbb = -pParam->BSIM4cgbo * ag0; gcdgmb = gcgmdb; gcsgmb = gcgmsb; gcbgmb = gcgmbb; gcdgb = gcsgb = gcbgb = 0.0; gcggb = gcgdb = gcgsb = gcgbb = 0.0; qgmb = pParam->BSIM4cgbo * vgmb; qgmid = qgdo + qgso + qgmb; qgate = 0.0; qbulk = -qgmb; qdrn = -qgdo; qsrc = -(qgmid + qbulk + qdrn); } else { gcggb = (cgdo + cgso + pParam->BSIM4cgbo ) * ag0; gcgdb = -cgdo * ag0; gcgsb = -cgso * ag0; gcgbb = -pParam->BSIM4cgbo * ag0; gcdgb = gcgdb; gcsgb = gcgsb; gcbgb = gcgbb; gcdgmb = gcsgmb = gcbgmb = 0.0; qgb = pParam->BSIM4cgbo * vgb; qgate = qgdo + qgso + qgb; qbulk = -qgb; qdrn = -qgdo; qsrc = -(qgate + qbulk + qdrn); } gcddb = (here->BSIM4capbd + cgdo) * ag0; gcdsb = gcsdb = 0.0; gcssb = (here->BSIM4capbs + cgso) * ag0; if (!here->BSIM4rbodyMod) { gcdbb = -(gcdgb + gcddb + gcdgmb); gcsbb = -(gcsgb + gcssb + gcsgmb); gcbdb = -here->BSIM4capbd * ag0; gcbsb = -here->BSIM4capbs * ag0; gcdbdb = 0.0; gcsbsb = 0.0; } else { gcdbb = gcsbb = gcbdb = gcbsb = 0.0; gcdbdb = -here->BSIM4capbd * ag0; gcsbsb = -here->BSIM4capbs * ag0; } gcbbb = -(gcbdb + gcbgb + gcbsb + gcbgmb); } } else { if (here->BSIM4trnqsMod == 0) { qsrc = qdrn - qgso; if (here->BSIM4rgateMod == 3) { gcgmgmb = (cgdo + cgso + pParam->BSIM4cgbo) * ag0; gcgmdb = -cgdo * ag0; gcgmsb = -cgso * ag0; gcgmbb = -pParam->BSIM4cgbo * ag0; gcdgmb = gcgmdb; gcsgmb = gcgmsb; gcbgmb = gcgmbb; gcggb = here->BSIM4cggb * ag0; gcgdb = here->BSIM4cgsb * ag0; gcgsb = here->BSIM4cgdb * ag0; gcgbb = -(gcggb + gcgdb + gcgsb); gcdgb = -(here->BSIM4cggb + here->BSIM4cbgb + here->BSIM4cdgb) * ag0; gcsgb = here->BSIM4cdgb * ag0; gcbgb = here->BSIM4cbgb * ag0; qgmb = pParam->BSIM4cgbo * vgmb; qgmid = qgdo + qgso + qgmb; qbulk -= qgmb; qdrn = -(qgate + qgmid + qbulk + qsrc); } else { gcggb = (here->BSIM4cggb + cgdo + cgso + pParam->BSIM4cgbo ) * ag0; gcgdb = (here->BSIM4cgsb - cgdo) * ag0; gcgsb = (here->BSIM4cgdb - cgso) * ag0; gcgbb = -(gcggb + gcgdb + gcgsb); gcdgb = -(here->BSIM4cggb + here->BSIM4cbgb + here->BSIM4cdgb + cgdo) * ag0; gcsgb = (here->BSIM4cdgb - cgso) * ag0; gcbgb = (here->BSIM4cbgb - pParam->BSIM4cgbo) * ag0; gcdgmb = gcsgmb = gcbgmb = 0.0; qgb = pParam->BSIM4cgbo * vgb; qgate += qgdo + qgso + qgb; qbulk -= qgb; qdrn = -(qgate + qbulk + qsrc); } gcddb = (here->BSIM4capbd + cgdo - (here->BSIM4cgsb + here->BSIM4cbsb + here->BSIM4cdsb)) * ag0; gcdsb = -(here->BSIM4cgdb + here->BSIM4cbdb + here->BSIM4cddb) * ag0; gcsdb = here->BSIM4cdsb * ag0; gcssb = (here->BSIM4cddb + here->BSIM4capbs + cgso) * ag0; if (!here->BSIM4rbodyMod) { gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb); gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb); gcbdb = (here->BSIM4cbsb - here->BSIM4capbd) * ag0; gcbsb = (here->BSIM4cbdb - here->BSIM4capbs) * ag0; gcdbdb = 0.0; gcsbsb = 0.0; } else { gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb) + here->BSIM4capbd * ag0; gcsbb = -(here->BSIM4cddb + here->BSIM4cdgb + here->BSIM4cdsb) * ag0; gcbdb = here->BSIM4cbsb * ag0; gcbsb = here->BSIM4cbdb * ag0; gcdbdb = -here->BSIM4capbd * ag0; gcsbsb = -here->BSIM4capbs * ag0; } gcbbb = -(gcbgb + gcbdb + gcbsb + gcbgmb); ggtg = ggtd = ggtb = ggts = 0.0; sxpart = 0.4; dxpart = 0.6; ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; } else { qcheq = here->BSIM4qchqs; CoxWL = model->BSIM4coxe * pParam->BSIM4weffCV * here->BSIM4nf * pParam->BSIM4leffCV; T0 = qdef * ScalingFactor / CoxWL; ggtg = here->BSIM4gtg = T0 * here->BSIM4gcrgg; ggts = here->BSIM4gts = T0 * here->BSIM4gcrgd; ggtd = here->BSIM4gtd = T0 * here->BSIM4gcrgs; ggtb = here->BSIM4gtb = T0 * here->BSIM4gcrgb; gqdef = ScalingFactor * ag0; gcqgb = here->BSIM4cqgb * ag0; gcqdb = here->BSIM4cqsb * ag0; gcqsb = here->BSIM4cqdb * ag0; gcqbb = here->BSIM4cqbb * ag0; if (fabs(qcheq) <= 1.0e-5 * CoxWL) { if (model->BSIM4xpart < 0.5) { sxpart = 0.4; } else if (model->BSIM4xpart > 0.5) { sxpart = 0.0; } else { sxpart = 0.5; } dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; } else { sxpart = qdrn / qcheq; Css = here->BSIM4cddb; Cds = -(here->BSIM4cgdb + here->BSIM4cddb + here->BSIM4cbdb); dsxpart_dVs = (Css - sxpart * (Css + Cds)) / qcheq; Csg = here->BSIM4cdgb; Cdg = -(here->BSIM4cggb + here->BSIM4cdgb + here->BSIM4cbgb); dsxpart_dVg = (Csg - sxpart * (Csg + Cdg)) / qcheq; Csd = here->BSIM4cdsb; Cdd = -(here->BSIM4cgsb + here->BSIM4cdsb + here->BSIM4cbsb); dsxpart_dVd = (Csd - sxpart * (Csd + Cdd)) / qcheq; dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs); } dxpart = 1.0 - sxpart; ddxpart_dVd = -dsxpart_dVd; ddxpart_dVg = -dsxpart_dVg; ddxpart_dVs = -dsxpart_dVs; ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs); if (here->BSIM4rgateMod == 3) { gcgmgmb = (cgdo + cgso + pParam->BSIM4cgbo) * ag0; gcgmdb = -cgdo * ag0; gcgmsb = -cgso * ag0; gcgmbb = -pParam->BSIM4cgbo * ag0; gcdgmb = gcgmdb; gcsgmb = gcgmsb; gcbgmb = gcgmbb; gcdgb = gcsgb = gcbgb = 0.0; gcggb = gcgdb = gcgsb = gcgbb = 0.0; qgmb = pParam->BSIM4cgbo * vgmb; qgmid = qgdo + qgso + qgmb; qgate = 0.0; qbulk = -qgmb; qdrn = -qgdo; qsrc = -qgso; } else { gcggb = (cgdo + cgso + pParam->BSIM4cgbo ) * ag0; gcgdb = -cgdo * ag0; gcgsb = -cgso * ag0; gcgbb = -pParam->BSIM4cgbo * ag0; gcdgb = gcgdb; gcsgb = gcgsb; gcbgb = gcgbb; gcdgmb = gcsgmb = gcbgmb = 0.0; qgb = pParam->BSIM4cgbo * vgb; qgate = qgdo + qgso + qgb; qbulk = -qgb; qdrn = -qgdo; qsrc = -qgso; } gcddb = (here->BSIM4capbd + cgdo) * ag0; gcdsb = gcsdb = 0.0; gcssb = (here->BSIM4capbs + cgso) * ag0; if (!here->BSIM4rbodyMod) { gcdbb = -(gcdgb + gcddb + gcdgmb); gcsbb = -(gcsgb + gcssb + gcsgmb); gcbdb = -here->BSIM4capbd * ag0; gcbsb = -here->BSIM4capbs * ag0; gcdbdb = 0.0; gcsbsb = 0.0; } else { gcdbb = gcsbb = gcbdb = gcbsb = 0.0; gcdbdb = -here->BSIM4capbd * ag0; gcsbsb = -here->BSIM4capbs * ag0; } gcbbb = -(gcbdb + gcbgb + gcbsb + gcbgmb); } } if (here->BSIM4trnqsMod) { *(ckt->CKTstate0 + here->BSIM4qcdump) = qdef * ScalingFactor; if (ckt->CKTmode & MODEINITTRAN) *(ckt->CKTstate1 + here->BSIM4qcdump) = *(ckt->CKTstate0 + here->BSIM4qcdump); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qcdump); if (error) return(error); } if (ByPass) goto line860; *(ckt->CKTstate0 + here->BSIM4qg) = qgate; *(ckt->CKTstate0 + here->BSIM4qd) = qdrn - *(ckt->CKTstate0 + here->BSIM4qbd); *(ckt->CKTstate0 + here->BSIM4qs) = qsrc - *(ckt->CKTstate0 + here->BSIM4qbs); if (here->BSIM4rgateMod == 3) *(ckt->CKTstate0 + here->BSIM4qgmid) = qgmid; if (!here->BSIM4rbodyMod) { *(ckt->CKTstate0 + here->BSIM4qb) = qbulk + *(ckt->CKTstate0 + here->BSIM4qbd) + *(ckt->CKTstate0 + here->BSIM4qbs); } else *(ckt->CKTstate0 + here->BSIM4qb) = qbulk; /* Store small signal parameters */ if (ckt->CKTmode & MODEINITSMSIG) { goto line1000; } if (!ChargeComputationNeeded) goto line850; if (ckt->CKTmode & MODEINITTRAN) { *(ckt->CKTstate1 + here->BSIM4qb) = *(ckt->CKTstate0 + here->BSIM4qb); *(ckt->CKTstate1 + here->BSIM4qg) = *(ckt->CKTstate0 + here->BSIM4qg); *(ckt->CKTstate1 + here->BSIM4qd) = *(ckt->CKTstate0 + here->BSIM4qd); if (here->BSIM4rgateMod == 3) *(ckt->CKTstate1 + here->BSIM4qgmid) = *(ckt->CKTstate0 + here->BSIM4qgmid); if (here->BSIM4rbodyMod) { *(ckt->CKTstate1 + here->BSIM4qbs) = *(ckt->CKTstate0 + here->BSIM4qbs); *(ckt->CKTstate1 + here->BSIM4qbd) = *(ckt->CKTstate0 + here->BSIM4qbd); } } error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qb); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qg); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qd); if (error) return(error); if (here->BSIM4rgateMod == 3) { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qgmid); if (error) return(error); } if (here->BSIM4rbodyMod) { error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qbs); if (error) return(error); error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4qbd); if (error) return(error); } goto line860; line850: /* Zero gcap and ceqcap if (!ChargeComputationNeeded) */ ceqqg = ceqqb = ceqqd = 0.0; ceqqjd = ceqqjs = 0.0; cqcheq = cqdef = 0.0; gcdgb = gcddb = gcdsb = gcdbb = 0.0; gcsgb = gcsdb = gcssb = gcsbb = 0.0; gcggb = gcgdb = gcgsb = gcgbb = 0.0; gcbdb = gcbgb = gcbsb = gcbbb = 0.0; gcgmgmb = gcgmdb = gcgmsb = gcgmbb = 0.0; gcdgmb = gcsgmb = gcbgmb = ceqqgmid = 0.0; gcdbdb = gcsbsb = 0.0; gqdef = gcqgb = gcqdb = gcqsb = gcqbb = 0.0; ggtg = ggtd = ggtb = ggts = 0.0; sxpart = (1.0 - (dxpart = (here->BSIM4mode > 0) ? 0.4 : 0.6)); ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0; dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0; if (here->BSIM4trnqsMod) { CoxWL = model->BSIM4coxe * pParam->BSIM4weffCV * here->BSIM4nf * pParam->BSIM4leffCV; T1 = here->BSIM4gcrg / CoxWL; here->BSIM4gtau = T1 * ScalingFactor; } else here->BSIM4gtau = 0.0; goto line900; line860: /* Calculate equivalent charge current */ cqgate = *(ckt->CKTstate0 + here->BSIM4cqg); cqbody = *(ckt->CKTstate0 + here->BSIM4cqb); cqdrn = *(ckt->CKTstate0 + here->BSIM4cqd); ceqqg = cqgate - gcggb * vgb + gcgdb * vbd + gcgsb * vbs; ceqqd = cqdrn - gcdgb * vgb - gcdgmb * vgmb + (gcddb + gcdbdb) * vbd - gcdbdb * vbd_jct + gcdsb * vbs; ceqqb = cqbody - gcbgb * vgb - gcbgmb * vgmb + gcbdb * vbd + gcbsb * vbs; if (here->BSIM4rgateMod == 3) ceqqgmid = *(ckt->CKTstate0 + here->BSIM4cqgmid) + gcgmdb * vbd + gcgmsb * vbs - gcgmgmb * vgmb; else ceqqgmid = 0.0; if (here->BSIM4rbodyMod) { ceqqjs = *(ckt->CKTstate0 + here->BSIM4cqbs) + gcsbsb * vbs_jct; ceqqjd = *(ckt->CKTstate0 + here->BSIM4cqbd) + gcdbdb * vbd_jct; } if (here->BSIM4trnqsMod) { T0 = ggtg * vgb - ggtd * vbd - ggts * vbs; ceqqg += T0; T1 = qdef * here->BSIM4gtau; ceqqd -= dxpart * T0 + T1 * (ddxpart_dVg * vgb - ddxpart_dVd * vbd - ddxpart_dVs * vbs); cqdef = *(ckt->CKTstate0 + here->BSIM4cqcdump) - gqdef * qdef; cqcheq = *(ckt->CKTstate0 + here->BSIM4cqcheq) - (gcqgb * vgb - gcqdb * vbd - gcqsb * vbs) + T0; } if (ckt->CKTmode & MODEINITTRAN) { *(ckt->CKTstate1 + here->BSIM4cqb) = *(ckt->CKTstate0 + here->BSIM4cqb); *(ckt->CKTstate1 + here->BSIM4cqg) = *(ckt->CKTstate0 + here->BSIM4cqg); *(ckt->CKTstate1 + here->BSIM4cqd) = *(ckt->CKTstate0 + here->BSIM4cqd); if (here->BSIM4rgateMod == 3) *(ckt->CKTstate1 + here->BSIM4cqgmid) = *(ckt->CKTstate0 + here->BSIM4cqgmid); if (here->BSIM4rbodyMod) { *(ckt->CKTstate1 + here->BSIM4cqbs) = *(ckt->CKTstate0 + here->BSIM4cqbs); *(ckt->CKTstate1 + here->BSIM4cqbd) = *(ckt->CKTstate0 + here->BSIM4cqbd); } } /* * Load current vector */ line900: if (here->BSIM4mode >= 0) { Gm = here->BSIM4gm; Gmbs = here->BSIM4gmbs; FwdSum = Gm + Gmbs; RevSum = 0.0; ceqdrn = model->BSIM4type * (cdrain - here->BSIM4gds * vds - Gm * vgs - Gmbs * vbs); ceqbd = model->BSIM4type * (here->BSIM4csub + here->BSIM4Igidl - (here->BSIM4gbds + here->BSIM4ggidld) * vds - (here->BSIM4gbgs + here->BSIM4ggidlg) * vgs - (here->BSIM4gbbs + here->BSIM4ggidlb) * vbs); ceqbs = model->BSIM4type * (here->BSIM4Igisl + here->BSIM4ggisls * vds - here->BSIM4ggislg * vgd - here->BSIM4ggislb * vbd); gbbdp = -(here->BSIM4gbds); gbbsp = here->BSIM4gbds + here->BSIM4gbgs + here->BSIM4gbbs; gbdpg = here->BSIM4gbgs; gbdpdp = here->BSIM4gbds; gbdpb = here->BSIM4gbbs; gbdpsp = -(gbdpg + gbdpdp + gbdpb); gbspg = 0.0; gbspdp = 0.0; gbspb = 0.0; gbspsp = 0.0; if (model->BSIM4igcMod) { gIstotg = here->BSIM4gIgsg + here->BSIM4gIgcsg; gIstotd = here->BSIM4gIgcsd; gIstots = here->BSIM4gIgss + here->BSIM4gIgcss; gIstotb = here->BSIM4gIgcsb; Istoteq = model->BSIM4type * (here->BSIM4Igs + here->BSIM4Igcs - gIstotg * vgs - here->BSIM4gIgcsd * vds - here->BSIM4gIgcsb * vbs); gIdtotg = here->BSIM4gIgdg + here->BSIM4gIgcdg; gIdtotd = here->BSIM4gIgdd + here->BSIM4gIgcdd; gIdtots = here->BSIM4gIgcds; gIdtotb = here->BSIM4gIgcdb; Idtoteq = model->BSIM4type * (here->BSIM4Igd + here->BSIM4Igcd - here->BSIM4gIgdg * vgd - here->BSIM4gIgcdg * vgs - here->BSIM4gIgcdd * vds - here->BSIM4gIgcdb * vbs); } else { gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0; gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0; } if (model->BSIM4igbMod) { gIbtotg = here->BSIM4gIgbg; gIbtotd = here->BSIM4gIgbd; gIbtots = here->BSIM4gIgbs; gIbtotb = here->BSIM4gIgbb; Ibtoteq = model->BSIM4type * (here->BSIM4Igb - here->BSIM4gIgbg * vgs - here->BSIM4gIgbd * vds - here->BSIM4gIgbb * vbs); } else gIbtotg = gIbtotd = gIbtots = gIbtotb = Ibtoteq = 0.0; if ((model->BSIM4igcMod != 0) || (model->BSIM4igbMod != 0)) { gIgtotg = gIstotg + gIdtotg + gIbtotg; gIgtotd = gIstotd + gIdtotd + gIbtotd ; gIgtots = gIstots + gIdtots + gIbtots; gIgtotb = gIstotb + gIdtotb + gIbtotb; Igtoteq = Istoteq + Idtoteq + Ibtoteq; } else gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0; if (here->BSIM4rgateMod == 2) T0 = vges - vgs; else if (here->BSIM4rgateMod == 3) T0 = vgms - vgs; if (here->BSIM4rgateMod > 1) { gcrgd = here->BSIM4gcrgd * T0; gcrgg = here->BSIM4gcrgg * T0; gcrgs = here->BSIM4gcrgs * T0; gcrgb = here->BSIM4gcrgb * T0; ceqgcrg = -(gcrgd * vds + gcrgg * vgs + gcrgb * vbs); gcrgg -= here->BSIM4gcrg; gcrg = here->BSIM4gcrg; } else ceqgcrg = gcrg = gcrgd = gcrgg = gcrgs = gcrgb = 0.0; } else { Gm = -here->BSIM4gm; Gmbs = -here->BSIM4gmbs; FwdSum = 0.0; RevSum = -(Gm + Gmbs); ceqdrn = -model->BSIM4type * (cdrain + here->BSIM4gds * vds + Gm * vgd + Gmbs * vbd); ceqbs = model->BSIM4type * (here->BSIM4csub + here->BSIM4Igisl + (here->BSIM4gbds + here->BSIM4ggisls) * vds - (here->BSIM4gbgs + here->BSIM4ggislg) * vgd - (here->BSIM4gbbs + here->BSIM4ggislb) * vbd); ceqbd = model->BSIM4type * (here->BSIM4Igidl - here->BSIM4ggidld * vds - here->BSIM4ggidlg * vgs - here->BSIM4ggidlb * vbs); gbbsp = -(here->BSIM4gbds); gbbdp = here->BSIM4gbds + here->BSIM4gbgs + here->BSIM4gbbs; gbdpg = 0.0; gbdpsp = 0.0; gbdpb = 0.0; gbdpdp = 0.0; gbspg = here->BSIM4gbgs; gbspsp = here->BSIM4gbds; gbspb = here->BSIM4gbbs; gbspdp = -(gbspg + gbspsp + gbspb); if (model->BSIM4igcMod) { gIstotg = here->BSIM4gIgsg + here->BSIM4gIgcdg; gIstotd = here->BSIM4gIgcds; gIstots = here->BSIM4gIgss + here->BSIM4gIgcdd; gIstotb = here->BSIM4gIgcdb; Istoteq = model->BSIM4type * (here->BSIM4Igs + here->BSIM4Igcd - here->BSIM4gIgsg * vgs - here->BSIM4gIgcdg * vgd + here->BSIM4gIgcdd * vds - here->BSIM4gIgcdb * vbd); gIdtotg = here->BSIM4gIgdg + here->BSIM4gIgcsg; gIdtotd = here->BSIM4gIgdd + here->BSIM4gIgcss; gIdtots = here->BSIM4gIgcsd; gIdtotb = here->BSIM4gIgcsb; Idtoteq = model->BSIM4type * (here->BSIM4Igd + here->BSIM4Igcs - (here->BSIM4gIgdg + here->BSIM4gIgcsg) * vgd + here->BSIM4gIgcsd * vds - here->BSIM4gIgcsb * vbd); } else { gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0; gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0; } if (model->BSIM4igbMod) { gIbtotg = here->BSIM4gIgbg; gIbtotd = here->BSIM4gIgbs; gIbtots = here->BSIM4gIgbd; gIbtotb = here->BSIM4gIgbb; Ibtoteq = model->BSIM4type * (here->BSIM4Igb - here->BSIM4gIgbg * vgd + here->BSIM4gIgbd * vds - here->BSIM4gIgbb * vbd); } else gIbtotg = gIbtotd = gIbtots = gIbtotb = Ibtoteq = 0.0; if ((model->BSIM4igcMod != 0) || (model->BSIM4igbMod != 0)) { gIgtotg = gIstotg + gIdtotg + gIbtotg; gIgtotd = gIstotd + gIdtotd + gIbtotd ; gIgtots = gIstots + gIdtots + gIbtots; gIgtotb = gIstotb + gIdtotb + gIbtotb; Igtoteq = Istoteq + Idtoteq + Ibtoteq; } else gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0; if (here->BSIM4rgateMod == 2) T0 = vges - vgs; else if (here->BSIM4rgateMod == 3) T0 = vgms - vgs; if (here->BSIM4rgateMod > 1) { gcrgd = here->BSIM4gcrgs * T0; gcrgg = here->BSIM4gcrgg * T0; gcrgs = here->BSIM4gcrgd * T0; gcrgb = here->BSIM4gcrgb * T0; ceqgcrg = -(gcrgg * vgd - gcrgs * vds + gcrgb * vbd); gcrgg -= here->BSIM4gcrg; gcrg = here->BSIM4gcrg; } else ceqgcrg = gcrg = gcrgd = gcrgg = gcrgs = gcrgb = 0.0; } if (model->BSIM4rdsMod == 1) { ceqgstot = model->BSIM4type * (here->BSIM4gstotd * vds + here->BSIM4gstotg * vgs + here->BSIM4gstotb * vbs); /* WDLiu: ceqgstot flowing away from sNodePrime */ gstot = here->BSIM4gstot; gstotd = here->BSIM4gstotd; gstotg = here->BSIM4gstotg; gstots = here->BSIM4gstots - gstot; gstotb = here->BSIM4gstotb; ceqgdtot = -model->BSIM4type * (here->BSIM4gdtotd * vds + here->BSIM4gdtotg * vgs + here->BSIM4gdtotb * vbs); /* WDLiu: ceqgdtot defined as flowing into dNodePrime */ gdtot = here->BSIM4gdtot; gdtotd = here->BSIM4gdtotd - gdtot; gdtotg = here->BSIM4gdtotg; gdtots = here->BSIM4gdtots; gdtotb = here->BSIM4gdtotb; } else { gstot = gstotd = gstotg = gstots = gstotb = ceqgstot = 0.0; gdtot = gdtotd = gdtotg = gdtots = gdtotb = ceqgdtot = 0.0; } if (model->BSIM4type > 0) { ceqjs = (here->BSIM4cbs - here->BSIM4gbs * vbs_jct); ceqjd = (here->BSIM4cbd - here->BSIM4gbd * vbd_jct); } else { ceqjs = -(here->BSIM4cbs - here->BSIM4gbs * vbs_jct); ceqjd = -(here->BSIM4cbd - here->BSIM4gbd * vbd_jct); ceqqg = -ceqqg; ceqqd = -ceqqd; ceqqb = -ceqqb; ceqgcrg = -ceqgcrg; if (here->BSIM4trnqsMod) { cqdef = -cqdef; cqcheq = -cqcheq; } if (here->BSIM4rbodyMod) { ceqqjs = -ceqqjs; ceqqjd = -ceqqjd; } if (here->BSIM4rgateMod == 3) ceqqgmid = -ceqqgmid; } /* * Loading RHS */ m = here->BSIM4m; #ifdef USE_OMP here->BSIM4rhsdPrime = m * (ceqjd - ceqbd + ceqgdtot - ceqdrn - ceqqd + Idtoteq); here->BSIM4rhsgPrime = m * (ceqqg - ceqgcrg + Igtoteq); if (here->BSIM4rgateMod == 2) here->BSIM4rhsgExt = m * ceqgcrg; else if (here->BSIM4rgateMod == 3) here->BSIM4grhsMid = m * (ceqqgmid + ceqgcrg); if (!here->BSIM4rbodyMod) { here->BSIM4rhsbPrime = m * (ceqbd + ceqbs - ceqjd - ceqjs - ceqqb + Ibtoteq); here->BSIM4rhssPrime = m * (ceqdrn - ceqbs + ceqjs + ceqqg + ceqqb + ceqqd + ceqqgmid - ceqgstot + Istoteq); } else { here->BSIM4rhsdb = m * (ceqjd + ceqqjd); here->BSIM4rhsbPrime = m * (ceqbd + ceqbs - ceqqb + Ibtoteq); here->BSIM4rhssb = m * (ceqjs + ceqqjs); here->BSIM4rhssPrime = m * (ceqdrn - ceqbs + ceqjs + ceqqd + ceqqg + ceqqb + ceqqjd + ceqqjs + ceqqgmid - ceqgstot + Istoteq); } if (model->BSIM4rdsMod) { here->BSIM4rhsd = m * ceqgdtot; here->BSIM4rhss = m * ceqgstot; } if (here->BSIM4trnqsMod) here->BSIM4rhsq = m * (cqcheq - cqdef); #else (*(ckt->CKTrhs + here->BSIM4dNodePrime) += m * (ceqjd - ceqbd + ceqgdtot - ceqdrn - ceqqd + Idtoteq)); (*(ckt->CKTrhs + here->BSIM4gNodePrime) -= m * (ceqqg - ceqgcrg + Igtoteq)); if (here->BSIM4rgateMod == 2) (*(ckt->CKTrhs + here->BSIM4gNodeExt) -= m * ceqgcrg); else if (here->BSIM4rgateMod == 3) (*(ckt->CKTrhs + here->BSIM4gNodeMid) -= m * (ceqqgmid + ceqgcrg)); if (!here->BSIM4rbodyMod) { (*(ckt->CKTrhs + here->BSIM4bNodePrime) += m * (ceqbd + ceqbs - ceqjd - ceqjs - ceqqb + Ibtoteq)); (*(ckt->CKTrhs + here->BSIM4sNodePrime) += m * (ceqdrn - ceqbs + ceqjs + ceqqg + ceqqb + ceqqd + ceqqgmid - ceqgstot + Istoteq)); } else { (*(ckt->CKTrhs + here->BSIM4dbNode) -= m * (ceqjd + ceqqjd)); (*(ckt->CKTrhs + here->BSIM4bNodePrime) += m * (ceqbd + ceqbs - ceqqb + Ibtoteq)); (*(ckt->CKTrhs + here->BSIM4sbNode) -= m * (ceqjs + ceqqjs)); (*(ckt->CKTrhs + here->BSIM4sNodePrime) += m * (ceqdrn - ceqbs + ceqjs + ceqqd + ceqqg + ceqqb + ceqqjd + ceqqjs + ceqqgmid - ceqgstot + Istoteq)); } if (model->BSIM4rdsMod) { (*(ckt->CKTrhs + here->BSIM4dNode) -= m * ceqgdtot); (*(ckt->CKTrhs + here->BSIM4sNode) += m * ceqgstot); } if (here->BSIM4trnqsMod) *(ckt->CKTrhs + here->BSIM4qNode) += m * (cqcheq - cqdef); #endif /* * Loading matrix */ if (!here->BSIM4rbodyMod) { gjbd = here->BSIM4gbd; gjbs = here->BSIM4gbs; } else gjbd = gjbs = 0.0; if (!model->BSIM4rdsMod) { gdpr = here->BSIM4drainConductance; gspr = here->BSIM4sourceConductance; } else gdpr = gspr = 0.0; geltd = here->BSIM4grgeltd; T1 = qdef * here->BSIM4gtau; #ifdef USE_OMP if (here->BSIM4rgateMod == 1) { here->BSIM4_1 = m * geltd; here->BSIM4_2 = m * geltd; here->BSIM4_3 = m * geltd; here->BSIM4_4 = m * (gcggb + geltd - ggtg + gIgtotg); here->BSIM4_5 = m * (gcgdb - ggtd + gIgtotd); here->BSIM4_6 = m * (gcgsb - ggts + gIgtots); here->BSIM4_7 = m * (gcgbb - ggtb + gIgtotb); } /* WDLiu: gcrg already subtracted from all gcrgg below */ else if (here->BSIM4rgateMod == 2) { here->BSIM4_8 = m * gcrg; here->BSIM4_9 = m * gcrgg; here->BSIM4_10 = m * gcrgd; here->BSIM4_11 = m * gcrgs; here->BSIM4_12 = m * gcrgb; here->BSIM4_13 = m * gcrg; here->BSIM4_14 = m * (gcggb - gcrgg - ggtg + gIgtotg); here->BSIM4_15 = m * (gcgdb - gcrgd - ggtd + gIgtotd); here->BSIM4_16 = m * (gcgsb - gcrgs - ggts + gIgtots); here->BSIM4_17 = m * (gcgbb - gcrgb - ggtb + gIgtotb); } else if (here->BSIM4rgateMod == 3) { here->BSIM4_18 = m * geltd; here->BSIM4_19 = m * geltd; here->BSIM4_20 = m * geltd; here->BSIM4_21 = m * (geltd + gcrg + gcgmgmb); here->BSIM4_22 = m * (gcrgd + gcgmdb); here->BSIM4_23 = m * gcrgg; here->BSIM4_24 = m * (gcrgs + gcgmsb); here->BSIM4_25 = m * (gcrgb + gcgmbb); here->BSIM4_26 = m * gcdgmb; here->BSIM4_27 = m * gcrg; here->BSIM4_28 = m * gcsgmb; here->BSIM4_29 = m * gcbgmb; here->BSIM4_30 = m * (gcggb - gcrgg - ggtg + gIgtotg); here->BSIM4_31 = m * (gcgdb - gcrgd - ggtd + gIgtotd); here->BSIM4_32 = m * (gcgsb - gcrgs - ggts + gIgtots); here->BSIM4_33 = m * (gcgbb - gcrgb - ggtb + gIgtotb); } else { here->BSIM4_34 = m * (gcggb - ggtg + gIgtotg); here->BSIM4_35 = m * (gcgdb - ggtd + gIgtotd); here->BSIM4_36 = m * (gcgsb - ggts + gIgtots); here->BSIM4_37 = m * (gcgbb - ggtb + gIgtotb); } if (model->BSIM4rdsMod) { here->BSIM4_38 = m * gdtotg; here->BSIM4_39 = m * gdtots; here->BSIM4_40 = m * gdtotb; here->BSIM4_41 = m * gstotd; here->BSIM4_42 = m * gstotg; here->BSIM4_43 = m * gstotb; } here->BSIM4_44 = m * (gdpr + here->BSIM4gds + here->BSIM4gbd + T1 * ddxpart_dVd - gdtotd + RevSum + gcddb + gbdpdp + dxpart * ggtd - gIdtotd); here->BSIM4_45 = m * (gdpr + gdtot); here->BSIM4_46 = m * (Gm + gcdgb - gdtotg + gbdpg - gIdtotg + dxpart * ggtg + T1 * ddxpart_dVg); here->BSIM4_47 = m * (here->BSIM4gds + gdtots - dxpart * ggts + gIdtots - T1 * ddxpart_dVs + FwdSum - gcdsb - gbdpsp); here->BSIM4_48 = m * (gjbd + gdtotb - Gmbs - gcdbb - gbdpb + gIdtotb - T1 * ddxpart_dVb - dxpart * ggtb); here->BSIM4_49 = m * (gdpr - gdtotd); here->BSIM4_50 = m * (gdpr + gdtot); here->BSIM4_51 = m * (here->BSIM4gds + gstotd + RevSum - gcsdb - gbspdp - T1 * dsxpart_dVd - sxpart * ggtd + gIstotd); here->BSIM4_52 = m * (gcsgb - Gm - gstotg + gbspg + sxpart * ggtg + T1 * dsxpart_dVg - gIstotg); here->BSIM4_53 = m * (gspr + here->BSIM4gds + here->BSIM4gbs + T1 * dsxpart_dVs - gstots + FwdSum + gcssb + gbspsp + sxpart * ggts - gIstots); here->BSIM4_54 = m * (gspr + gstot); here->BSIM4_55 = m * (gjbs + gstotb + Gmbs - gcsbb - gbspb - sxpart * ggtb - T1 * dsxpart_dVb + gIstotb); here->BSIM4_56 = m * (gspr - gstots); here->BSIM4_57 = m * (gspr + gstot); here->BSIM4_58 = m * (gcbdb - gjbd + gbbdp - gIbtotd); here->BSIM4_59 = m * (gcbgb - here->BSIM4gbgs - gIbtotg); here->BSIM4_60 = m * (gcbsb - gjbs + gbbsp - gIbtots); here->BSIM4_61 = m * (gjbd + gjbs + gcbbb - here->BSIM4gbbs - gIbtotb); ggidld = here->BSIM4ggidld; ggidlg = here->BSIM4ggidlg; ggidlb = here->BSIM4ggidlb; ggislg = here->BSIM4ggislg; ggisls = here->BSIM4ggisls; ggislb = here->BSIM4ggislb; /* stamp gidl */ here->BSIM4_62 = m * ggidld; here->BSIM4_63 = m * ggidlg; here->BSIM4_64 = m * (ggidlg + ggidld + ggidlb); here->BSIM4_65 = m * ggidlb; here->BSIM4_66 = m * ggidld; here->BSIM4_67 = m * ggidlg; here->BSIM4_68 = m * (ggidlg + ggidld + ggidlb); here->BSIM4_69 = m * ggidlb; /* stamp gisl */ here->BSIM4_70 = m * (ggisls + ggislg + ggislb); here->BSIM4_71 = m * ggislg; here->BSIM4_72 = m * ggisls; here->BSIM4_73 = m * ggislb; here->BSIM4_74 = m * (ggislg + ggisls + ggislb); here->BSIM4_75 = m * ggislg; here->BSIM4_76 = m * ggisls; here->BSIM4_77 = m * ggislb; if (here->BSIM4rbodyMod) { here->BSIM4_78 = m * (gcdbdb - here->BSIM4gbd); here->BSIM4_79 = m * (here->BSIM4gbs - gcsbsb); here->BSIM4_80 = m * (gcdbdb - here->BSIM4gbd); here->BSIM4_81 = m * (here->BSIM4gbd - gcdbdb + here->BSIM4grbpd + here->BSIM4grbdb); here->BSIM4_82 = m * here->BSIM4grbpd; here->BSIM4_83 = m * here->BSIM4grbdb; here->BSIM4_84 = m * here->BSIM4grbpd; here->BSIM4_85 = m * here->BSIM4grbpb; here->BSIM4_86 = m * here->BSIM4grbps; here->BSIM4_87 = m * (here->BSIM4grbpd + here->BSIM4grbps + here->BSIM4grbpb); /* WDLiu: (gcbbb - here->BSIM4gbbs) already added to BPbpPtr */ here->BSIM4_88 = m * (gcsbsb - here->BSIM4gbs); here->BSIM4_89 = m * here->BSIM4grbps; here->BSIM4_90 = m * here->BSIM4grbsb; here->BSIM4_91 = m * (here->BSIM4gbs - gcsbsb + here->BSIM4grbps + here->BSIM4grbsb); here->BSIM4_92 = m * here->BSIM4grbdb; here->BSIM4_93 = m * here->BSIM4grbpb; here->BSIM4_94 = m * here->BSIM4grbsb; here->BSIM4_95 = m * (here->BSIM4grbsb + here->BSIM4grbdb + here->BSIM4grbpb); } if (here->BSIM4trnqsMod) { here->BSIM4_96 = m * (gqdef + here->BSIM4gtau); here->BSIM4_97 = m * (ggtg - gcqgb); here->BSIM4_98 = m * (ggtd - gcqdb); here->BSIM4_99 = m * (ggts - gcqsb); here->BSIM4_100 = m * (ggtb - gcqbb); here->BSIM4_101 = m * dxpart * here->BSIM4gtau; here->BSIM4_102 = m * sxpart * here->BSIM4gtau; here->BSIM4_103 = m * here->BSIM4gtau; } #else if (here->BSIM4rgateMod == 1) { (*(here->BSIM4GEgePtr) += m * geltd); (*(here->BSIM4GPgePtr) -= m * geltd); (*(here->BSIM4GEgpPtr) -= m * geltd); (*(here->BSIM4GPgpPtr) += m * (gcggb + geltd - ggtg + gIgtotg)); (*(here->BSIM4GPdpPtr) += m * (gcgdb - ggtd + gIgtotd)); (*(here->BSIM4GPspPtr) += m * (gcgsb - ggts + gIgtots)); (*(here->BSIM4GPbpPtr) += m * (gcgbb - ggtb + gIgtotb)); } /* WDLiu: gcrg already subtracted from all gcrgg below */ else if (here->BSIM4rgateMod == 2) { (*(here->BSIM4GEgePtr) += m * gcrg); (*(here->BSIM4GEgpPtr) += m * gcrgg); (*(here->BSIM4GEdpPtr) += m * gcrgd); (*(here->BSIM4GEspPtr) += m * gcrgs); (*(here->BSIM4GEbpPtr) += m * gcrgb); (*(here->BSIM4GPgePtr) -= m * gcrg); (*(here->BSIM4GPgpPtr) += m * (gcggb - gcrgg - ggtg + gIgtotg)); (*(here->BSIM4GPdpPtr) += m * (gcgdb - gcrgd - ggtd + gIgtotd)); (*(here->BSIM4GPspPtr) += m * (gcgsb - gcrgs - ggts + gIgtots)); (*(here->BSIM4GPbpPtr) += m * (gcgbb - gcrgb - ggtb + gIgtotb)); } else if (here->BSIM4rgateMod == 3) { (*(here->BSIM4GEgePtr) += m * geltd); (*(here->BSIM4GEgmPtr) -= m * geltd); (*(here->BSIM4GMgePtr) -= m * geltd); (*(here->BSIM4GMgmPtr) += m * (geltd + gcrg + gcgmgmb)); (*(here->BSIM4GMdpPtr) += m * (gcrgd + gcgmdb)); (*(here->BSIM4GMgpPtr) += m * gcrgg); (*(here->BSIM4GMspPtr) += m * (gcrgs + gcgmsb)); (*(here->BSIM4GMbpPtr) += m * (gcrgb + gcgmbb)); (*(here->BSIM4DPgmPtr) += m * gcdgmb); (*(here->BSIM4GPgmPtr) -= m * gcrg); (*(here->BSIM4SPgmPtr) += m * gcsgmb); (*(here->BSIM4BPgmPtr) += m * gcbgmb); (*(here->BSIM4GPgpPtr) += m * (gcggb - gcrgg - ggtg + gIgtotg)); (*(here->BSIM4GPdpPtr) += m * (gcgdb - gcrgd - ggtd + gIgtotd)); (*(here->BSIM4GPspPtr) += m * (gcgsb - gcrgs - ggts + gIgtots)); (*(here->BSIM4GPbpPtr) += m * (gcgbb - gcrgb - ggtb + gIgtotb)); } else { (*(here->BSIM4GPgpPtr) += m * (gcggb - ggtg + gIgtotg)); (*(here->BSIM4GPdpPtr) += m * (gcgdb - ggtd + gIgtotd)); (*(here->BSIM4GPspPtr) += m * (gcgsb - ggts + gIgtots)); (*(here->BSIM4GPbpPtr) += m * (gcgbb - ggtb + gIgtotb)); } if (model->BSIM4rdsMod) { (*(here->BSIM4DgpPtr) += m * gdtotg); (*(here->BSIM4DspPtr) += m * gdtots); (*(here->BSIM4DbpPtr) += m * gdtotb); (*(here->BSIM4SdpPtr) += m * gstotd); (*(here->BSIM4SgpPtr) += m * gstotg); (*(here->BSIM4SbpPtr) += m * gstotb); } (*(here->BSIM4DPdpPtr) += m * (gdpr + here->BSIM4gds + here->BSIM4gbd + T1 * ddxpart_dVd - gdtotd + RevSum + gcddb + gbdpdp + dxpart * ggtd - gIdtotd)); (*(here->BSIM4DPdPtr) -= m * (gdpr + gdtot)); (*(here->BSIM4DPgpPtr) += m * (Gm + gcdgb - gdtotg + gbdpg - gIdtotg + dxpart * ggtg + T1 * ddxpart_dVg)); (*(here->BSIM4DPspPtr) -= m * (here->BSIM4gds + gdtots - dxpart * ggts + gIdtots - T1 * ddxpart_dVs + FwdSum - gcdsb - gbdpsp)); (*(here->BSIM4DPbpPtr) -= m * (gjbd + gdtotb - Gmbs - gcdbb - gbdpb + gIdtotb - T1 * ddxpart_dVb - dxpart * ggtb)); (*(here->BSIM4DdpPtr) -= m * (gdpr - gdtotd)); (*(here->BSIM4DdPtr) += m * (gdpr + gdtot)); (*(here->BSIM4SPdpPtr) -= m * (here->BSIM4gds + gstotd + RevSum - gcsdb - gbspdp - T1 * dsxpart_dVd - sxpart * ggtd + gIstotd)); (*(here->BSIM4SPgpPtr) += m * (gcsgb - Gm - gstotg + gbspg + sxpart * ggtg + T1 * dsxpart_dVg - gIstotg)); (*(here->BSIM4SPspPtr) += m * (gspr + here->BSIM4gds + here->BSIM4gbs + T1 * dsxpart_dVs - gstots + FwdSum + gcssb + gbspsp + sxpart * ggts - gIstots)); (*(here->BSIM4SPsPtr) -= m * (gspr + gstot)); (*(here->BSIM4SPbpPtr) -= m * (gjbs + gstotb + Gmbs - gcsbb - gbspb - sxpart * ggtb - T1 * dsxpart_dVb + gIstotb)); (*(here->BSIM4SspPtr) -= m * (gspr - gstots)); (*(here->BSIM4SsPtr) += m * (gspr + gstot)); (*(here->BSIM4BPdpPtr) += m * (gcbdb - gjbd + gbbdp - gIbtotd)); (*(here->BSIM4BPgpPtr) += m * (gcbgb - here->BSIM4gbgs - gIbtotg)); (*(here->BSIM4BPspPtr) += m * (gcbsb - gjbs + gbbsp - gIbtots)); (*(here->BSIM4BPbpPtr) += m * (gjbd + gjbs + gcbbb - here->BSIM4gbbs - gIbtotb)); ggidld = here->BSIM4ggidld; ggidlg = here->BSIM4ggidlg; ggidlb = here->BSIM4ggidlb; ggislg = here->BSIM4ggislg; ggisls = here->BSIM4ggisls; ggislb = here->BSIM4ggislb; /* stamp gidl */ (*(here->BSIM4DPdpPtr) += m * ggidld); (*(here->BSIM4DPgpPtr) += m * ggidlg); (*(here->BSIM4DPspPtr) -= m * (ggidlg + ggidld + ggidlb)); (*(here->BSIM4DPbpPtr) += m * ggidlb); (*(here->BSIM4BPdpPtr) -= m * ggidld); (*(here->BSIM4BPgpPtr) -= m * ggidlg); (*(here->BSIM4BPspPtr) += m * (ggidlg + ggidld + ggidlb)); (*(here->BSIM4BPbpPtr) -= m * ggidlb); /* stamp gisl */ (*(here->BSIM4SPdpPtr) -= m * (ggisls + ggislg + ggislb)); (*(here->BSIM4SPgpPtr) += m * ggislg); (*(here->BSIM4SPspPtr) += m * ggisls); (*(here->BSIM4SPbpPtr) += m * ggislb); (*(here->BSIM4BPdpPtr) += m * (ggislg + ggisls + ggislb)); (*(here->BSIM4BPgpPtr) -= m * ggislg); (*(here->BSIM4BPspPtr) -= m * ggisls); (*(here->BSIM4BPbpPtr) -= m * ggislb); if (here->BSIM4rbodyMod) { (*(here->BSIM4DPdbPtr) += m * (gcdbdb - here->BSIM4gbd)); (*(here->BSIM4SPsbPtr) -= m * (here->BSIM4gbs - gcsbsb)); (*(here->BSIM4DBdpPtr) += m * (gcdbdb - here->BSIM4gbd)); (*(here->BSIM4DBdbPtr) += m * (here->BSIM4gbd - gcdbdb + here->BSIM4grbpd + here->BSIM4grbdb)); (*(here->BSIM4DBbpPtr) -= m * here->BSIM4grbpd); (*(here->BSIM4DBbPtr) -= m * here->BSIM4grbdb); (*(here->BSIM4BPdbPtr) -= m * here->BSIM4grbpd); (*(here->BSIM4BPbPtr) -= m * here->BSIM4grbpb); (*(here->BSIM4BPsbPtr) -= m * here->BSIM4grbps); (*(here->BSIM4BPbpPtr) += m * (here->BSIM4grbpd + here->BSIM4grbps + here->BSIM4grbpb)); /* WDLiu: (gcbbb - here->BSIM4gbbs) already added to BPbpPtr */ (*(here->BSIM4SBspPtr) += m * (gcsbsb - here->BSIM4gbs)); (*(here->BSIM4SBbpPtr) -= m * here->BSIM4grbps); (*(here->BSIM4SBbPtr) -= m * here->BSIM4grbsb); (*(here->BSIM4SBsbPtr) += m * (here->BSIM4gbs - gcsbsb + here->BSIM4grbps + here->BSIM4grbsb)); (*(here->BSIM4BdbPtr) -= m * here->BSIM4grbdb); (*(here->BSIM4BbpPtr) -= m * here->BSIM4grbpb); (*(here->BSIM4BsbPtr) -= m * here->BSIM4grbsb); (*(here->BSIM4BbPtr) += m * (here->BSIM4grbsb + here->BSIM4grbdb + here->BSIM4grbpb)); } if (here->BSIM4trnqsMod) { (*(here->BSIM4QqPtr) += m * (gqdef + here->BSIM4gtau)); (*(here->BSIM4QgpPtr) += m * (ggtg - gcqgb)); (*(here->BSIM4QdpPtr) += m * (ggtd - gcqdb)); (*(here->BSIM4QspPtr) += m * (ggts - gcqsb)); (*(here->BSIM4QbpPtr) += m * (ggtb - gcqbb)); (*(here->BSIM4DPqPtr) += m * dxpart * here->BSIM4gtau); (*(here->BSIM4SPqPtr) += m * sxpart * here->BSIM4gtau); (*(here->BSIM4GPqPtr) -= m * here->BSIM4gtau); } #endif line1000: ; #ifndef USE_OMP } /* End of MOSFET Instance */ } /* End of Model Instance */ #endif return(OK); } /* function to compute poly depletion effect */ int BSIM4polyDepletion( double phi, double ngate, double epsgate, double coxe, double Vgs, double *Vgs_eff, double *dVgs_eff_dVg) { double T1, T2, T3, T4, T5, T6, T7, T8; /* Poly Gate Si Depletion Effect */ if ((ngate > 1.0e18) && (ngate < 1.0e25) && (Vgs > phi) && (epsgate!=0) ){ T1 = 1.0e6 * CHARGE * epsgate * ngate / (coxe * coxe); T8 = Vgs - phi; T4 = sqrt(1.0 + 2.0 * T8 / T1); T2 = 2.0 * T8 / (T4 + 1.0); T3 = 0.5 * T2 * T2 / T1; /* T3 = Vpoly */ T7 = 1.12 - T3 - 0.05; T6 = sqrt(T7 * T7 + 0.224); T5 = 1.12 - 0.5 * (T7 + T6); *Vgs_eff = Vgs - T5; *dVgs_eff_dVg = 1.0 - (0.5 - 0.5 / T4) * (1.0 + T7 / T6); } else { *Vgs_eff = Vgs; *dVgs_eff_dVg = 1.0; } return(0); } #ifdef USE_OMP void BSIM4LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt) { int InstCount, idx; BSIM4instance **InstArray; BSIM4instance *here; BSIM4model *model = (BSIM4model*)inModel; InstArray = model->BSIM4InstanceArray; InstCount = model->BSIM4InstCount; for(idx = 0; idx < InstCount; idx++) { here = InstArray[idx]; model = BSIM4modPtr(here); /* Update b for Ax = b */ (*(ckt->CKTrhs + here->BSIM4dNodePrime) += here->BSIM4rhsdPrime); (*(ckt->CKTrhs + here->BSIM4gNodePrime) -= here->BSIM4rhsgPrime); if (here->BSIM4rgateMod == 2) (*(ckt->CKTrhs + here->BSIM4gNodeExt) -= here->BSIM4rhsgExt); else if (here->BSIM4rgateMod == 3) (*(ckt->CKTrhs + here->BSIM4gNodeMid) -= here->BSIM4grhsMid); if (!here->BSIM4rbodyMod) { (*(ckt->CKTrhs + here->BSIM4bNodePrime) += here->BSIM4rhsbPrime); (*(ckt->CKTrhs + here->BSIM4sNodePrime) += here->BSIM4rhssPrime); } else { (*(ckt->CKTrhs + here->BSIM4dbNode) -= here->BSIM4rhsdb); (*(ckt->CKTrhs + here->BSIM4bNodePrime) += here->BSIM4rhsbPrime); (*(ckt->CKTrhs + here->BSIM4sbNode) -= here->BSIM4rhssb); (*(ckt->CKTrhs + here->BSIM4sNodePrime) += here->BSIM4rhssPrime); } if (model->BSIM4rdsMod) { (*(ckt->CKTrhs + here->BSIM4dNode) -= here->BSIM4rhsd); (*(ckt->CKTrhs + here->BSIM4sNode) += here->BSIM4rhss); } if (here->BSIM4trnqsMod) *(ckt->CKTrhs + here->BSIM4qNode) += here->BSIM4rhsq; /* Update A for Ax = b */ if (here->BSIM4rgateMod == 1) { (*(here->BSIM4GEgePtr) += here->BSIM4_1); (*(here->BSIM4GPgePtr) -= here->BSIM4_2); (*(here->BSIM4GEgpPtr) -= here->BSIM4_3); (*(here->BSIM4GPgpPtr) += here->BSIM4_4); (*(here->BSIM4GPdpPtr) += here->BSIM4_5); (*(here->BSIM4GPspPtr) += here->BSIM4_6); (*(here->BSIM4GPbpPtr) += here->BSIM4_7); } else if (here->BSIM4rgateMod == 2) { (*(here->BSIM4GEgePtr) += here->BSIM4_8); (*(here->BSIM4GEgpPtr) += here->BSIM4_9); (*(here->BSIM4GEdpPtr) += here->BSIM4_10); (*(here->BSIM4GEspPtr) += here->BSIM4_11); (*(here->BSIM4GEbpPtr) += here->BSIM4_12); (*(here->BSIM4GPgePtr) -= here->BSIM4_13); (*(here->BSIM4GPgpPtr) += here->BSIM4_14); (*(here->BSIM4GPdpPtr) += here->BSIM4_15); (*(here->BSIM4GPspPtr) += here->BSIM4_16); (*(here->BSIM4GPbpPtr) += here->BSIM4_17); } else if (here->BSIM4rgateMod == 3) { (*(here->BSIM4GEgePtr) += here->BSIM4_18); (*(here->BSIM4GEgmPtr) -= here->BSIM4_19); (*(here->BSIM4GMgePtr) -= here->BSIM4_20); (*(here->BSIM4GMgmPtr) += here->BSIM4_21); (*(here->BSIM4GMdpPtr) += here->BSIM4_22); (*(here->BSIM4GMgpPtr) += here->BSIM4_23); (*(here->BSIM4GMspPtr) += here->BSIM4_24); (*(here->BSIM4GMbpPtr) += here->BSIM4_25); (*(here->BSIM4DPgmPtr) += here->BSIM4_26); (*(here->BSIM4GPgmPtr) -= here->BSIM4_27); (*(here->BSIM4SPgmPtr) += here->BSIM4_28); (*(here->BSIM4BPgmPtr) += here->BSIM4_29); (*(here->BSIM4GPgpPtr) += here->BSIM4_30); (*(here->BSIM4GPdpPtr) += here->BSIM4_31); (*(here->BSIM4GPspPtr) += here->BSIM4_32); (*(here->BSIM4GPbpPtr) += here->BSIM4_33); } else { (*(here->BSIM4GPgpPtr) += here->BSIM4_34); (*(here->BSIM4GPdpPtr) += here->BSIM4_35); (*(here->BSIM4GPspPtr) += here->BSIM4_36); (*(here->BSIM4GPbpPtr) += here->BSIM4_37); } if (model->BSIM4rdsMod) { (*(here->BSIM4DgpPtr) += here->BSIM4_38); (*(here->BSIM4DspPtr) += here->BSIM4_39); (*(here->BSIM4DbpPtr) += here->BSIM4_40); (*(here->BSIM4SdpPtr) += here->BSIM4_41); (*(here->BSIM4SgpPtr) += here->BSIM4_42); (*(here->BSIM4SbpPtr) += here->BSIM4_43); } (*(here->BSIM4DPdpPtr) += here->BSIM4_44); (*(here->BSIM4DPdPtr) -= here->BSIM4_45); (*(here->BSIM4DPgpPtr) += here->BSIM4_46); (*(here->BSIM4DPspPtr) -= here->BSIM4_47); (*(here->BSIM4DPbpPtr) -= here->BSIM4_48); (*(here->BSIM4DdpPtr) -= here->BSIM4_49); (*(here->BSIM4DdPtr) += here->BSIM4_50); (*(here->BSIM4SPdpPtr) -= here->BSIM4_51); (*(here->BSIM4SPgpPtr) += here->BSIM4_52); (*(here->BSIM4SPspPtr) += here->BSIM4_53); (*(here->BSIM4SPsPtr) -= here->BSIM4_54); (*(here->BSIM4SPbpPtr) -= here->BSIM4_55); (*(here->BSIM4SspPtr) -= here->BSIM4_56); (*(here->BSIM4SsPtr) += here->BSIM4_57); (*(here->BSIM4BPdpPtr) += here->BSIM4_58); (*(here->BSIM4BPgpPtr) += here->BSIM4_59); (*(here->BSIM4BPspPtr) += here->BSIM4_60); (*(here->BSIM4BPbpPtr) += here->BSIM4_61); /* stamp gidl */ (*(here->BSIM4DPdpPtr) += here->BSIM4_62); (*(here->BSIM4DPgpPtr) += here->BSIM4_63); (*(here->BSIM4DPspPtr) -= here->BSIM4_64); (*(here->BSIM4DPbpPtr) += here->BSIM4_65); (*(here->BSIM4BPdpPtr) -= here->BSIM4_66); (*(here->BSIM4BPgpPtr) -= here->BSIM4_67); (*(here->BSIM4BPspPtr) += here->BSIM4_68); (*(here->BSIM4BPbpPtr) -= here->BSIM4_69); /* stamp gisl */ (*(here->BSIM4SPdpPtr) -= here->BSIM4_70); (*(here->BSIM4SPgpPtr) += here->BSIM4_71); (*(here->BSIM4SPspPtr) += here->BSIM4_72); (*(here->BSIM4SPbpPtr) += here->BSIM4_73); (*(here->BSIM4BPdpPtr) += here->BSIM4_74); (*(here->BSIM4BPgpPtr) -= here->BSIM4_75); (*(here->BSIM4BPspPtr) -= here->BSIM4_76); (*(here->BSIM4BPbpPtr) -= here->BSIM4_77); if (here->BSIM4rbodyMod) { (*(here->BSIM4DPdbPtr) += here->BSIM4_78); (*(here->BSIM4SPsbPtr) -= here->BSIM4_79); (*(here->BSIM4DBdpPtr) += here->BSIM4_80); (*(here->BSIM4DBdbPtr) += here->BSIM4_81); (*(here->BSIM4DBbpPtr) -= here->BSIM4_82); (*(here->BSIM4DBbPtr) -= here->BSIM4_83); (*(here->BSIM4BPdbPtr) -= here->BSIM4_84); (*(here->BSIM4BPbPtr) -= here->BSIM4_85); (*(here->BSIM4BPsbPtr) -= here->BSIM4_86); (*(here->BSIM4BPbpPtr) += here->BSIM4_87); (*(here->BSIM4SBspPtr) += here->BSIM4_88); (*(here->BSIM4SBbpPtr) -= here->BSIM4_89); (*(here->BSIM4SBbPtr) -= here->BSIM4_90); (*(here->BSIM4SBsbPtr) += here->BSIM4_91); (*(here->BSIM4BdbPtr) -= here->BSIM4_92); (*(here->BSIM4BbpPtr) -= here->BSIM4_93); (*(here->BSIM4BsbPtr) -= here->BSIM4_94); (*(here->BSIM4BbPtr) += here->BSIM4_95); } if (here->BSIM4trnqsMod) { (*(here->BSIM4QqPtr) += here->BSIM4_96); (*(here->BSIM4QgpPtr) += here->BSIM4_97); (*(here->BSIM4QdpPtr) += here->BSIM4_98); (*(here->BSIM4QspPtr) += here->BSIM4_99); (*(here->BSIM4QbpPtr) += here->BSIM4_100); (*(here->BSIM4DPqPtr) += here->BSIM4_101); (*(here->BSIM4SPqPtr) += here->BSIM4_102); (*(here->BSIM4GPqPtr) -= here->BSIM4_103); } } } #endif
sphere.h
#ifndef batoid_sphere_h #define batoid_sphere_h #include "surface.h" namespace batoid { #if defined(BATOID_GPU) #pragma omp declare target #endif class Sphere : public Surface { public: Sphere(double R); ~Sphere(); virtual const Surface* getDevPtr() const override; virtual double sag(double, double) const override; virtual void normal( double x, double y, double& nx, double& ny, double& nz ) const override; virtual bool timeToIntersect( double x, double y, double z, double vx, double vy, double vz, double& dt ) const override; private: const double _R; // Radius of curvature const double _Rsq; // R*R const double _Rinv; // 1/R const double _Rinvsq; // 1/R/R double _dzdr(double r) const; }; #if defined(BATOID_GPU) #pragma omp end declare target #endif } #endif
convolutiondepthwise_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
main.c
#include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include <time.h> #include <omp.h> double interval(struct timespec start, struct timespec end) { struct timespec temp; temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; if (temp.tv_nsec < 0) { temp.tv_sec = temp.tv_sec - 1; temp.tv_nsec = temp.tv_nsec + 1000000000; } return (((double)temp.tv_sec) + ((double)temp.tv_nsec)*1.0e-9); } /* This method does not require adjusting a #define constant How to use this method: struct timespec time_start, time_stop; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_start); // DO SOMETHING THAT TAKES TIME clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_stop); measurement = interval(time_start, time_stop); */ #define PLATEAU 0 typedef unsigned char image_t, *image_ptr_t; typedef int img_t, *img_ptr_t; img_ptr_t convert2data(image_ptr_t image, int width, int height); image_ptr_t convert2image(img_ptr_t image, int width, int height); void steepest_descent_kernel(img_ptr_t in, img_ptr_t *out, int width, int height); void border_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height); void minima_basin_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height); void watershed_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height); int main(int argc, char **argv); int main(int argc, char **argv) { int width, height, channels; image_ptr_t data = stbi_load(argv[1], &width, &height, &channels, 1); img_ptr_t input = convert2data(data, width, height); stbi_image_free(data); img_ptr_t lowest_descent = NULL; img_ptr_t border = NULL; img_ptr_t minima = NULL; img_ptr_t watershed = NULL; struct timespec time_start, time_stop; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_start); // start timer steepest_descent_kernel(input, &lowest_descent, width, height); border_kernel(input, lowest_descent, &border, width, height); minima_basin_kernel(input, border, &minima, width, height); watershed_kernel(input, minima, &watershed, width, height); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_stop); printf("%f\n", interval(time_start, time_stop)); stbi_write_png("1_lowest_descent_result.png", width, height, channels, convert2image(lowest_descent, width, height), width * channels); stbi_write_png("2_border_result.png", width, height, channels, convert2image(border, width, height), width * channels); stbi_write_png("3_minima_basin_result.png", width, height, channels, convert2image(minima, width, height), width * channels); stbi_write_png("4_watershed_result.png", width, height, channels, convert2image(watershed, width, height), width * channels); free(watershed); free(lowest_descent); free(border); free(input); return 0; } img_ptr_t convert2data(image_ptr_t image, int width, int height) { img_ptr_t temp = (img_ptr_t)calloc(width * height, sizeof(img_t)); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { temp[i * width + j] = (img_t)image[i * width + j]; } } return temp; } image_ptr_t convert2image(img_ptr_t image, int width, int height) { // Step 1: find min and max values from the image img_t max = INT_MIN, min = INT_MAX; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { img_t current_pixel = image[i * width + j]; if (current_pixel < min) min = current_pixel; if (current_pixel > max) max = current_pixel; } } // printf("min: %i\n", min); // printf("max: %i\n", max); // create a new image with the values scaled from [0-255] image_ptr_t temp = (image_ptr_t)calloc(width * height, sizeof(image_t)); float max_min = max-min; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { img_t pix_val = image[i * width + j]; float val = (pix_val - min) / (max_min); temp[i * width + j] = (image_t)(val * 255); } } return temp; } void steepest_descent_kernel(img_ptr_t in, img_ptr_t *out, int width, int height) { img_ptr_t _lowest = (img_ptr_t)calloc(width * height, sizeof(img_t)); if (_lowest == NULL) { perror("Failed to allocate memory!\n"); exit(EXIT_FAILURE); } #pragma omp parallel for for (int i = 1; i < height - 1; i++) { for (int j = 1; j < width - 1; j++) { // find minimum in neighbors img_t min = (img_t)INFINITY; if (min > in[i * width + (j + 1)]) min = in[i * width + (j + 1)]; if (min > in[i * width + (j - 1)]) min = in[i * width + (j - 1)]; if (min > in[(i + 1) * width + j]) min = in[(i + 1) * width + j]; if (min > in[(i - 1) * width + j]) min = in[(i - 1) * width + j]; if (min > in[(i - 1) * width + (j + 1)]) min = in[(i - 1) * width + (j + 1)]; if (min > in[(i - 1) * width + (j - 1)]) min = in[(i - 1) * width + (j - 1)]; if (min > in[(i + 1) * width + (j + 1)]) min = in[(i + 1) * width + (j + 1)]; if (min > in[(i + 1) * width + (j - 1)]) min = in[(i + 1) * width + (j - 1)]; // check if we have plateaued bool exists_q = false; img_t p = in[i * width + j]; if (p > in[i * width + (j + 1)] && in[i * width + (j + 1)] == min) { _lowest[i * width + j] = -(i * width + (j + 1)); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[i * width + (j - 1)] && in[i * width + (j - 1)] == min) { _lowest[i * width + j] = -(i * width + (j - 1)); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[(i + 1) * width + j] && in[(i + 1) * width + j] == min) { _lowest[i * width + j] = -((i - 1) * width + j); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[(i - 1) * width + j] && in[(i - 1) * width + j] == min) { _lowest[i * width + j] = -((i - 1) * width + j); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[(i - 1) * width + (j + 1)] && in[(i - 1) * width + (j + 1)] == min) { _lowest[i * width + j] = -((i - 1) * width + (j + 1)); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[(i - 1) * width + (j - 1)] && in[(i - 1) * width + (j - 1)] == min) { _lowest[i * width + j] = -((i - 1) * width + (j - 1)); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[(i + 1) * width + (j + 1)] && in[(i + 1) * width + (j + 1)] == min) { _lowest[i * width + j] = -((i + 1) * width + (j + 1)); exists_q = true; goto FOUND_LOWEST_DESCENT; } if (p > in[(i + 1) * width + (j - 1)] && in[(i + 1) * width + (j - 1)] == min) { _lowest[i * width + j] = -((i + 1) * width + (j - 1)); exists_q = true; goto FOUND_LOWEST_DESCENT; } FOUND_LOWEST_DESCENT: if (!exists_q) { _lowest[i * width + j] = (img_t)PLATEAU; } } } *out = _lowest; } void border_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height) { img_ptr_t _border = (img_ptr_t)calloc(width * height, sizeof(img_t)); if (in == NULL) { perror("Failed to allocate memory!\n"); exit(EXIT_FAILURE); } bool stable = false; img_ptr_t temp_border = (img_ptr_t)calloc(width * height, sizeof(img_t)); if (temp_border == NULL) { perror("Failed to allocate memory!\n"); exit(EXIT_FAILURE); }; while (!stable) { stable = true; memcpy(temp_border, _border, width * height * sizeof(img_t)); #pragma omp parallel for for (int i = 1; i < height - 1; i++) { for (int j = 1; j < width - 1; j++) { if (in[i * width + j] == (img_t)PLATEAU) { if (in[i * width + (j + 1)] < 0 && image[i * width + (j + 1)] == image[i * width + j]) { if (temp_border[i * width + j] != -(i * width + (j + 1))) stable = false; temp_border[i * width + j] = -(i * width + (j + 1)); break; } if (in[i * width + (j - 1)] < 0 && image[i * width + (j - 1)] == image[i * width + j]) { if (temp_border[i * width + j] != -(i * width + (j - 1))) stable = false; temp_border[i * width + j] = -(i * width + (j - 1)); break; } if (in[(i + 1) * width + (j + 1)] < 0 && image[(i + 1) * width + (j + 1)] == image[i * width + j]) { if (temp_border[i * width + j] != -((i + 1) * width + (j + 1))) stable = false; temp_border[i * width + j] = -((i + 1) * width + (j + 1)); break; } if (in[(i + 1) * width + (j - 1)] < 0 && image[(i + 1) * width + (j - 1)] == image[i * width + j]) { if (temp_border[i * width + j] != -((i + 1) * width + (j - 1))) stable = false; temp_border[i * width + j] = -((i + 1) * width + (j - 1)); break; } if (in[(i - 1) * width + (j + 1)] < 0 && image[(i - 1) * width + (j + 1)] == image[i * width + j]) { if (temp_border[i * width + j] != -((i - 1) * width + (j + 1))) stable = false; temp_border[i * width + j] = -((i - 1) * width + (j + 1)); break; } if (in[(i - 1) * width + (j - 1)] < 0 && image[(i - 1) * width + (j - 1)] == image[i * width + j]) { if (temp_border[i * width + j] != -((i - 1) * width + (j - 1))) stable = false; temp_border[i * width + j] = -((i - 1) * width + (j - 1)); break; } if (in[(i + 1) * width + j] < 0 && image[(i + 1) * width + j] == image[i * width + j]) { if (temp_border[i * width + j] != -((i + 1) * width + j)) stable = false; temp_border[i * width + j] = -((i + 1) * width + j); break; } if (in[(i - 1) * width + j] < 0 && image[(i - 1) * width + j] == image[i * width + j]) { if (temp_border[i * width + j] != -((i - 1) * width + j)) stable = false; temp_border[i * width + j] = -((i - 1) * width + j); break; } } } } memcpy(_border, temp_border, width * height * sizeof(img_t)); } #pragma omp parallel for for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (in[i * width + j] == (img_t)PLATEAU) { _border[i * width + j] = -(i * width + j); } } } *out = _border; } void minima_basin_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height) { img_ptr_t _minima = (img_ptr_t)calloc(width * height, sizeof(img_t)); if (_minima == NULL) { perror("Failed to allocate memory!\n"); exit(EXIT_FAILURE); } memcpy(_minima, in, height * width * sizeof(img_t)); bool stable = false; while (!stable) { stable = true; #pragma omp parallel for for (int i = 1; i < height - 1; i++) { for (int j = 1; j < width - 1; j++) { if (_minima[i * width + j] > (img_t)PLATEAU) { img_t label = (img_t)INFINITY; if (_minima[i * width + (j + 1)] < label && image[i * width + (j + 1)] == image[i * width + j]) { label = _minima[i * width + (j + 1)]; } if (_minima[i * width + (j - 1)] < label && image[i * width + (j - 1)] == image[i * width + j]) { label = _minima[i * width + (j - 1)]; } if (_minima[(i + 1) * width + (j + 1)] < label && image[(i + 1) * width + (j + 1)] == image[i * width + j]) { label = _minima[(i + 1) * width + (j + 1)]; } if (_minima[(i + 1) * width + (j - 1)] < label && image[(i + 1) * width + (j - 1)] == image[i * width + j]) { label = _minima[(i + 1) * width + (j - 1)]; } if (_minima[(i - 1) * width + (j + 1)] < label && image[(i - 1) * width + (j + 1)] == image[i * width + j]) { label = _minima[(i - 1) * width + (j + 1)]; } if (_minima[(i - 1) * width + (j - 1)] < label && image[(i - 1) * width + (j - 1)] == image[i * width + j]) { label = _minima[(i - 1) * width + (j - 1)]; } if (_minima[(i + 1) * width + j] < label && image[(i + 1) * width + j] == image[i * width + j]) { label = _minima[(i + 1) * width + j]; } if (_minima[(i - 1) * width + j] < label && image[(i - 1) * width + j] == image[i * width + j]) { label = _minima[(i - 1) * width + j]; } if (label < _minima[i * width + j]) { if (_minima[_minima[i * width + j]] != label) { stable = false; } _minima[_minima[i * width + j]] = label; } } } } #pragma omp parallel for for (int i = 1; i < height - 1; i++) { for (int j = 1; j < width - 1; j++) { if (_minima[i * width + j] > (img_t)PLATEAU) { img_t label = _minima[i * width + j]; img_t ref = (img_t)INFINITY; while (label != ref) { ref = label; label = _minima[ref]; } if (label != ref) { stable = false; } _minima[i * width + j] = label; } } } } *out = _minima; } void watershed_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height) { img_ptr_t _watershed = (img_ptr_t)calloc(height * width, sizeof(img_t)); if (_watershed == NULL) { perror("Failed to allocate memory!\n"); exit(EXIT_FAILURE); } memcpy(_watershed, in, height * width * sizeof(img_t)); #pragma omp parallel for for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { _watershed[i * width + j] = abs(_watershed[i * width + j]); } } #pragma omp parallel for for (int i = 1; i < height - 1; i++) { for (int j = 1; j < width - 1; j++) { img_t label = _watershed[i * width + j]; if (label != (i * width + j)) { img_t ref = (img_t)INFINITY; while (ref != label) { ref = label; label = _watershed[ref]; } _watershed[i * width + j] = label; } } } *out = _watershed; }
opencl_DES_bs_h_plug.c
/* * This software is Copyright (c) 2012-2015 Sayantan Datta <std2048 at gmail dot com> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * Based on Solar Designer implementation of DES_bs_b.c in jtr-v1.7.9 */ #ifdef HAVE_OPENCL #include <assert.h> #include <string.h> #include <sys/time.h> #include "options.h" #include "opencl_DES_bs.h" #include "opencl_DES_hst_dev_shared.h" #include "mask_ext.h" #define PADDING 2048 #define CONFIG_FILE "$JOHN/kernels/DES_bs_kernel_h_%s.config" #define BINARY_FILE "$JOHN/kernels/DES_bs_kernel_h_"Zu"_%s_%d.bin" static cl_kernel **kernels; static cl_mem buffer_map, buffer_bs_keys, buffer_unchecked_hashes; static WORD *marked_salts = NULL, current_salt = 0; static unsigned int *processed_salts = NULL; static int mask_mode = 0; #include "memdbg.h" static int des_crypt_25(int *pcount, struct db_salt *salt); static void create_clobj_kpc(size_t gws) { unsigned int iter_count = (mask_int_cand.num_int_cand + DES_BS_DEPTH - 1) >> DES_LOG_DEPTH; create_keys_buffer(gws, PADDING); buffer_bs_keys = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, (gws * iter_count + PADDING) * sizeof(DES_bs_vector) * 56, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Create buffer_bs_keys failed.\n"); buffer_unchecked_hashes = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, (gws * iter_count + PADDING) * sizeof(DES_bs_vector) * 64, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Create buffer_unchecked_hashes failed.\n"); assert(gws * iter_count <= ((0x1 << 27) - 1)); } static void release_clobj_kpc() { if (buffer_bs_keys != (cl_mem)0) { release_keys_buffer(); HANDLE_CLERROR(clReleaseMemObject(buffer_bs_keys), "Release buffer_bs_keys failed.\n"); HANDLE_CLERROR(clReleaseMemObject(buffer_unchecked_hashes), "Release buffer_unchecked_hashes failed.\n"); buffer_bs_keys = (cl_mem)0; } } static void create_clobj(struct db_main *db) { int i; marked_salts = (WORD *) mem_alloc(4096 * sizeof(WORD)); for (i = 0; i < 4096; i++) marked_salts[i] = 0x7fffffff; opencl_DES_bs_init_index(); buffer_map = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, 768 * sizeof(unsigned int), opencl_DES_bs_index768, &ret_code); HANDLE_CLERROR(ret_code, "Create buffer_map.\n"); create_int_keys_buffer(); build_tables(db); } static void release_clobj() { int i; if (buffer_map) { MEM_FREE(marked_salts); HANDLE_CLERROR(clReleaseMemObject(buffer_map), "Release buffer_map failed.\n"); release_tables(); release_int_keys_buffer(); buffer_map = 0; } for (i = 0; i < 4096; i++) if (kernels[gpu_id][i]) { HANDLE_CLERROR(clReleaseKernel(kernels[gpu_id][i]), "Release kernel(crypt(i)) failed.\n"); kernels[gpu_id][i] = 0; } } static void clean_all_buffers() { int i; release_clobj(); release_clobj_kpc(); for( i = 0; i < 4096; i++) if (kernels[gpu_id][i]) HANDLE_CLERROR(clReleaseKernel(kernels[gpu_id][i]), "Error releasing kernel"); if (program[gpu_id]) { HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Error releasing Program"); program[gpu_id] = NULL; } for (i = 0; i < MAX_GPU_DEVICES; i++) MEM_FREE(kernels[i]); MEM_FREE(kernels); MEM_FREE(processed_salts); finish_checking(); } /* First call must use salt = 0, to initialize processed_salts. */ static void build_salt(WORD salt) { WORD new; static WORD old = 0xffffff; int dst; new = salt; for (dst = 0; dst < 24; dst++) { if ((new ^ old) & 1) { DES_bs_vector sp1, sp2; int src1 = dst; int src2 = dst + 24; if (new & 1) { src1 = src2; src2 = dst; } sp1 = opencl_DES_E[src1]; sp2 = opencl_DES_E[src2]; processed_salts[4096 * 96 + dst] = sp1; processed_salts[4096 * 96 + dst + 24] = sp2; processed_salts[4096 * 96 + dst + 48] = sp1 + 32; processed_salts[4096 * 96 + dst + 72] = sp2 + 32; } new >>= 1; old >>= 1; if (new == old) break; } old = salt; memcpy(&processed_salts[salt * 96], &processed_salts[4096 * 96], 96 * sizeof(unsigned int)); } static void init_global_variables() { int i; processed_salts = (unsigned int *) mem_calloc(4097, 96 * sizeof(unsigned int)); kernels = (cl_kernel **) mem_calloc(MAX_GPU_DEVICES, sizeof(cl_kernel *)); for (i = 0; i < MAX_GPU_DEVICES; i++) kernels[i] = (cl_kernel *) mem_calloc(4096, sizeof(cl_kernel)); init_checking(); mask_int_cand_target = opencl_speed_index(gpu_id) / 3000; } static char* enc_salt(WORD salt_val) { unsigned int index[48] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83}; char *build_opts; unsigned int i, j; build_opts = (char *)mem_calloc(1000, sizeof(char)); for (i = 0, j = 0; i < 48; i++) { sprintf(build_opts + j, "-D index%u=%u ", index[i], processed_salts[salt_val * 96 + index[i]]); j = strlen(build_opts); } return build_opts; } static void set_salt(void *salt) { current_salt = *(WORD *)salt; } static void modify_build_save_restore(WORD salt_val, int id_gpu, int save_binary, int force_build, size_t lws, cl_program *program_ptr) { char kernel_bin_name[200]; char *kernel_source = NULL; char *d_name, *full_path; FILE *file; sprintf(kernel_bin_name, BINARY_FILE, lws, d_name = get_device_name(id_gpu), salt_val); MEM_FREE(d_name); file = fopen(full_path = path_expand_safe(kernel_bin_name), "r"); MEM_FREE(full_path); if (file == NULL || force_build) { char build_opts[10000]; char *encoded_salt; char *kernel_filename = "$JOHN/kernels/DES_bs_kernel_h.cl"; encoded_salt = enc_salt(salt_val); opencl_read_source(kernel_filename, &kernel_source); if (get_platform_vendor_id(get_platform_id(id_gpu)) != DEV_AMD) sprintf(build_opts, "-D WORK_GROUP_SIZE="Zu" %s", lws, encoded_salt); else sprintf(build_opts, "-D WORK_GROUP_SIZE="Zu" -fno-bin-amdil -fno-bin-source -fbin-exe %s", lws, encoded_salt); MEM_FREE(encoded_salt); opencl_build(id_gpu, build_opts, save_binary, kernel_bin_name, program_ptr, kernel_filename, kernel_source); if (options.verbosity > VERB_DEFAULT) fprintf(stderr, "Salt compiled from Source:%d\n", salt_val); } else { size_t program_size; fclose(file); program_size = opencl_read_source(kernel_bin_name, &kernel_source); opencl_build_from_binary(id_gpu, program_ptr, kernel_source, program_size); if (options.verbosity > VERB_DEFAULT) fprintf(stderr, "Salt compiled from Binary:%d\n", salt_val); } MEM_FREE(kernel_source); } static void init_kernel(WORD salt_val, int id_gpu, int save_binary, int force_build, size_t lws) { cl_program program; cl_int err_code; if (marked_salts[salt_val] == salt_val) return; modify_build_save_restore(salt_val, id_gpu, save_binary, force_build, lws, &program); kernels[id_gpu][salt_val] = clCreateKernel(program, "DES_bs_25", &err_code); HANDLE_CLERROR(err_code, "Create Kernel DES_bs_25 failed.\n"); #if _OPENMP #pragma omp critical #endif { HANDLE_CLERROR(clSetKernelArg(kernels[id_gpu][salt_val], 0, sizeof(cl_mem), &buffer_map), "Failed setting kernel argument buffer_map, kernel DES_bs_25.\n"); } marked_salts[salt_val] = salt_val; HANDLE_CLERROR(clReleaseProgram(program), "Error releasing Program"); } static void set_kernel_args_kpc() { int i; for (i = 0; i < 4096; i++) { if (marked_salts[i] == i) { HANDLE_CLERROR(clSetKernelArg(kernels[gpu_id][i], 1, sizeof(cl_mem), &buffer_bs_keys), "Failed setting kernel argument buffer_bs_keys, kernel DES_bs_25.\n"); HANDLE_CLERROR(clSetKernelArg(kernels[gpu_id][i], 2, sizeof(cl_mem), &buffer_unchecked_hashes), "Failed setting kernel argument buffer_unchecked_hashes, kernel DES_bs_25.\n"); } } set_common_kernel_args_kpc(buffer_unchecked_hashes, buffer_bs_keys); } /* if returns 0x800000, means there is no restriction on lws due to local memory limitations.*/ /* if returns 0, means local memory shouldn't be allocated.*/ static size_t find_smem_lws_limit(unsigned int force_global_keys) { cl_ulong s_mem_sz = get_local_memory_size(gpu_id); size_t expected_lws_limit; cl_uint warp_size; if (force_global_keys) return 0x800000; if (!s_mem_sz) return 0; if (gpu_amd(device_info[gpu_id])) { if (clGetDeviceInfo(devices[gpu_id], CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof(cl_uint), &warp_size, 0) != CL_SUCCESS) warp_size = 64; } else if (gpu_nvidia(device_info[gpu_id])) { if (clGetDeviceInfo(devices[gpu_id], CL_DEVICE_WARP_SIZE_NV, sizeof(cl_uint), &warp_size, 0) != CL_SUCCESS) warp_size = 32; } else warp_size = 1; expected_lws_limit = s_mem_sz / (sizeof(DES_bs_vector) * 56); if (!expected_lws_limit) return 0; expected_lws_limit = GET_MULTIPLE_OR_ZERO( expected_lws_limit, warp_size); if (warp_size == 1 && expected_lws_limit & (expected_lws_limit - 1)) { get_power_of_two(expected_lws_limit); expected_lws_limit >>= 1; } return expected_lws_limit; } #define calc_ms(start, end) \ ((long double)(end.tv_sec - start.tv_sec) * 1000.000 + \ (long double)(end.tv_usec - start.tv_usec) / 1000.000) /* Sets global_work_size and max_keys_per_crypt. */ static void gws_tune(size_t gws_init, long double kernel_run_ms, int gws_tune_flag, void (*set_key)(char *, int), WORD test_salt, int mask_mode) { unsigned int i; char key[PLAINTEXT_LENGTH + 1] = "alterit"; struct timeval startc, endc; long double time_ms = 0; int pcount; unsigned int des_log_depth = mask_mode ? 0 : DES_LOG_DEPTH; size_t iter_count = (mask_int_cand.num_int_cand + DES_BS_DEPTH - 1) >> DES_LOG_DEPTH; size_t gws_limit = get_max_mem_alloc_size(gpu_id) / (sizeof(opencl_DES_bs_transfer) * iter_count) ; if (gws_limit > PADDING) gws_limit -= PADDING; if (gws_limit & (gws_limit - 1)) { get_power_of_two(gws_limit); gws_limit >>= 1; } assert(gws_limit > PADDING); assert(!(gws_limit & (gws_limit - 1))); if (gws_tune_flag) global_work_size = gws_init; if (global_work_size > gws_limit) global_work_size = gws_limit; if (gws_tune_flag) { release_clobj_kpc(); create_clobj_kpc(global_work_size); set_kernel_args_kpc(); for (i = 0; i < (global_work_size << des_log_depth); i++) { key[i & 3] = i & 255; key[(i & 3) + 3] = i ^ 0x3E; set_key(key, i); } set_salt(&test_salt); gettimeofday(&startc, NULL); pcount = (int)(global_work_size << des_log_depth); des_crypt_25((int *)&pcount, NULL); gettimeofday(&endc, NULL); time_ms = calc_ms(startc, endc); global_work_size = (size_t)((kernel_run_ms / time_ms) * (long double)global_work_size); } if (global_work_size < local_work_size) global_work_size = local_work_size; get_power_of_two(global_work_size); if (global_work_size > gws_limit) global_work_size = gws_limit; release_clobj_kpc(); create_clobj_kpc(global_work_size); set_kernel_args_kpc(); /* for hash_ids[2*x + 1], 27 bits for storing gid and 5 bits for bs depth. */ assert(global_work_size <= ((1U << 28) - 1)); fmt_opencl_DES.params.max_keys_per_crypt = global_work_size << des_log_depth; fmt_opencl_DES.params.min_keys_per_crypt = 1U << des_log_depth; } static void release_kernels() { int i; for (i = 0; i < 4096; i++) if(marked_salts[i] == i) { HANDLE_CLERROR(clReleaseKernel(kernels[gpu_id][i]), "Release kernel(crypt(i)) failed.\n"); kernels[gpu_id][i] = 0; marked_salts[i] = 0x7fffffff; } } static void auto_tune_all(long double kernel_run_ms, void (*set_key)(char *, int), WORD test_salt, int mask_mode, size_t extern_lws_limit, unsigned int *forced_global_keys) { unsigned int force_global_keys = 1; unsigned int gws_tune_flag = 1; unsigned int lws_tune_flag = 1; size_t s_mem_limited_lws; struct timeval startc, endc; long double time_ms = 0; char key[PLAINTEXT_LENGTH + 1] = "alterit"; unsigned int des_log_depth = mask_mode ? 0 : DES_LOG_DEPTH; if (cpu(device_info[gpu_id])) { if (get_platform_vendor_id(platform_id) == DEV_AMD) force_global_keys = 0; else force_global_keys = 1; kernel_run_ms = 5; } else if (amd_vliw4(device_info[gpu_id]) || amd_vliw5(device_info[gpu_id]) || gpu_intel(device_info[gpu_id])) { force_global_keys = 0; } else if (gpu_nvidia(device_info[gpu_id])) { force_global_keys = 1; } else if (gpu(device_info[gpu_id])) { force_global_keys = 0; } else { force_global_keys = 1; kernel_run_ms = 40; } local_work_size = 0; global_work_size = 0; gws_tune_flag = 1; lws_tune_flag = 1; opencl_get_user_preferences(FORMAT_LABEL); if (global_work_size) gws_tune_flag = 0; if (local_work_size || restore_lws_config(CONFIG_FILE, gpu_id, &local_work_size, extern_lws_limit, forced_global_keys)) { lws_tune_flag = 0; if (local_work_size & (local_work_size - 1)) { get_power_of_two(local_work_size); } } s_mem_limited_lws = find_smem_lws_limit(force_global_keys); #if 0 fprintf(stdout, "Limit_smem:"Zu", Force global keys:%u," s_mem_limited_lws, force_global_keys); #endif if (s_mem_limited_lws == 0x800000 || !s_mem_limited_lws) { long double best_time_ms; size_t best_lws, lws_limit; *forced_global_keys = 1; release_kernels(); init_kernel(test_salt, gpu_id, 0, 1, 0); gws_tune(1024, 2 * kernel_run_ms, gws_tune_flag, set_key, test_salt, mask_mode); gws_tune(global_work_size, kernel_run_ms, gws_tune_flag, set_key, test_salt, mask_mode); lws_limit = get_kernel_max_lws(gpu_id, kernels[gpu_id][test_salt]); if (lws_limit > global_work_size) lws_limit = global_work_size; if (lws_limit > extern_lws_limit) lws_limit = extern_lws_limit; if (lws_tune_flag) { if (gpu(device_info[gpu_id]) && lws_limit >= 32) local_work_size = 32; else local_work_size = get_kernel_preferred_multiple(gpu_id, kernels[gpu_id][test_salt]); } if (local_work_size > lws_limit) local_work_size = lws_limit; assert(local_work_size <= lws_limit); if (lws_tune_flag) { time_ms = 0; best_time_ms = 999999.00; best_lws = local_work_size; while (local_work_size <= lws_limit && local_work_size <= PADDING) { int pcount, i; for (i = 0; i < (global_work_size << des_log_depth); i++) { key[i & 3] = i & 255; key[(i & 3) + 3] = i ^ 0x3F; set_key(key, i); } set_salt(&test_salt); gettimeofday(&startc, NULL); pcount = (int)(global_work_size << des_log_depth); des_crypt_25((int *)&pcount, NULL); gettimeofday(&endc, NULL); time_ms = calc_ms(startc, endc); if (time_ms < best_time_ms) { best_lws = local_work_size; best_time_ms = time_ms; } #if 0 fprintf(stdout, "GWS: "Zu", LWS: "Zu", Limit_smem:"Zu", Limit_kernel:"Zu"," "Current time:%Lf, Best time:%Lf\n", global_work_size, local_work_size, s_mem_limited_lws, get_kernel_max_lws(gpu_id, kernels[gpu_id][test_salt]), time_ms, best_time_ms); #endif local_work_size *= 2; } local_work_size = best_lws; gws_tune(global_work_size, kernel_run_ms, gws_tune_flag, set_key, test_salt, mask_mode); } } else { long double best_time_ms; size_t best_lws; cl_uint warp_size; if (gpu_amd(device_info[gpu_id])) { if (clGetDeviceInfo(devices[gpu_id], CL_DEVICE_WAVEFRONT_WIDTH_AMD, sizeof(cl_uint), &warp_size, 0) != CL_SUCCESS) warp_size = 64; } else if (gpu_nvidia(device_info[gpu_id])) { if (clGetDeviceInfo(devices[gpu_id], CL_DEVICE_WARP_SIZE_NV, sizeof(cl_uint), &warp_size, 0) != CL_SUCCESS) warp_size = 32; } else { warp_size = 1; if (!(cpu(device_info[gpu_id]) || gpu_intel(device_info[gpu_id]))) fprintf(stderr, "Possible auto_tune fail!!.\n"); } if (lws_tune_flag) local_work_size = warp_size; if (s_mem_limited_lws > extern_lws_limit) s_mem_limited_lws = extern_lws_limit; if (local_work_size > s_mem_limited_lws) local_work_size = s_mem_limited_lws; release_kernels(); init_kernel(test_salt, gpu_id, 0, 1, local_work_size); if (local_work_size > get_kernel_max_lws(gpu_id, kernels[gpu_id][test_salt])) { local_work_size = get_kernel_max_lws(gpu_id, kernels[gpu_id][test_salt]); release_kernels(); init_kernel(test_salt, gpu_id, 0, 1, local_work_size); } gws_tune(1024, 2 * kernel_run_ms, gws_tune_flag, set_key, test_salt, mask_mode); gws_tune(global_work_size, kernel_run_ms, gws_tune_flag, set_key, test_salt, mask_mode); if (global_work_size < s_mem_limited_lws) { s_mem_limited_lws = global_work_size; if (local_work_size > s_mem_limited_lws) local_work_size = s_mem_limited_lws; } if (lws_tune_flag) { best_time_ms = 999999.00; best_lws = local_work_size; while (local_work_size <= s_mem_limited_lws && local_work_size <= PADDING) { int pcount, i; release_kernels(); init_kernel(test_salt, gpu_id, 0, 1, local_work_size); set_kernel_args_kpc(); for (i = 0; i < (global_work_size << des_log_depth); i++) { key[i & 3] = i & 255; key[(i & 3) + 3] = i ^ 0x3E; set_key(key, i); } set_salt(&test_salt); gettimeofday(&startc, NULL); pcount = (int)(global_work_size << des_log_depth); des_crypt_25((int *)&pcount, NULL); gettimeofday(&endc, NULL); time_ms = calc_ms(startc, endc); if (time_ms < best_time_ms && local_work_size <= get_kernel_max_lws( gpu_id, kernels[gpu_id][test_salt])) { best_lws = local_work_size; best_time_ms = time_ms; } #if 0 fprintf(stdout, "GWS: "Zu", LWS: "Zu", Limit_smem:"Zu", Limit_kernel:"Zu"," "Current time:%Lf, Best time:%Lf\n", global_work_size, local_work_size, s_mem_limited_lws, get_kernel_max_lws(gpu_id, kernels[gpu_id][test_salt]), time_ms, best_time_ms); #endif if (gpu_amd(device_info[gpu_id]) || gpu_nvidia(device_info[gpu_id])) { if (local_work_size < 16) local_work_size = 16; else if (local_work_size < 32) local_work_size = 32; else if (local_work_size < 64) local_work_size = 64; else if (local_work_size < 96) local_work_size = 96; else if (local_work_size < 128) local_work_size = 128; else local_work_size += warp_size; } else local_work_size *= 2; } local_work_size = best_lws; release_kernels(); init_kernel(test_salt, gpu_id, 0, 1, local_work_size); set_kernel_args_kpc(); gws_tune(global_work_size, kernel_run_ms, gws_tune_flag, set_key, test_salt, mask_mode); } } release_kernels(); if (lws_tune_flag) save_lws_config(CONFIG_FILE, gpu_id, local_work_size, *forced_global_keys); if (options.verbosity > VERB_DEFAULT) fprintf(stdout, "GWS: "Zu", LWS: "Zu"\n", global_work_size, local_work_size); } static void reset(struct db_main *db) { static int initialized; int i; size_t extern_lws_limit, limit_temp; unsigned int forced_global_keys = 0; if (initialized) { struct db_salt *salt; WORD salt_list[4096]; unsigned int num_salts, i; release_clobj_kpc(); release_clobj(); if (options.flags & FLG_MASK_CHK && mask_int_cand.num_int_cand > 1) mask_mode = 1; create_clobj(db); if (!mask_mode) create_clobj_kpc(global_work_size); extern_lws_limit = create_checking_kernel_set_args(); limit_temp = create_keys_kernel_set_args(mask_mode); if (limit_temp < extern_lws_limit) extern_lws_limit = limit_temp; if (mask_mode) { unsigned int max_uncracked_hashes = 0; WORD test_salt = 0; salt = db -> salts; max_uncracked_hashes = 0; do { if (salt -> count > max_uncracked_hashes) { max_uncracked_hashes = salt -> count; test_salt = *(WORD *)salt -> salt; } } while ((salt = salt -> next)); forced_global_keys = 0; auto_tune_all(300, fmt_opencl_DES.methods.set_key, test_salt, mask_mode, extern_lws_limit, &forced_global_keys); } salt = db -> salts; num_salts = 0; do { salt_list[num_salts++] = (*(WORD *)salt -> salt); } while ((salt = salt -> next)); #if _OPENMP && PARALLEL_BUILD #pragma omp parallel for #endif for (i = 0; i < num_salts; i++) init_kernel(salt_list[i], gpu_id, 1, 0, forced_global_keys ? 0 :local_work_size); set_kernel_args_kpc(); } else { char *ciphertext; WORD salt_val; create_clobj(NULL); extern_lws_limit = create_checking_kernel_set_args(); limit_temp = create_keys_kernel_set_args(0); if (limit_temp < extern_lws_limit) extern_lws_limit = limit_temp; for (i = 0; i < 4096; i++) build_salt((WORD)i); salt_val = *(WORD *)fmt_opencl_DES.methods.salt(fmt_opencl_DES.methods.split( fmt_opencl_DES.params.tests[0].ciphertext, 0, &fmt_opencl_DES)); auto_tune_all(300, fmt_opencl_DES.methods.set_key, salt_val, 0, extern_lws_limit, &forced_global_keys); i = 0; while (fmt_opencl_DES.params.tests[i].ciphertext) { ciphertext = fmt_opencl_DES.methods.split(fmt_opencl_DES.params.tests[i].ciphertext, 0, &fmt_opencl_DES); salt_val = *(WORD *)fmt_opencl_DES.methods.salt(ciphertext); init_kernel(salt_val, gpu_id, 1, 0, forced_global_keys ? 0 :local_work_size); i++; } set_kernel_args_kpc(); initialized++; } } static int des_crypt_25(int *pcount, struct db_salt *salt) { const int count = mask_mode ? *pcount : (*pcount + DES_BS_DEPTH - 1) >> DES_LOG_DEPTH; size_t *lws = local_work_size ? &local_work_size : NULL; size_t current_gws = local_work_size ? (count + local_work_size - 1) / local_work_size * local_work_size : count; size_t iter_count = (mask_int_cand.num_int_cand + DES_BS_DEPTH - 1) >> DES_LOG_DEPTH; process_keys(current_gws, lws); if (salt && num_uncracked_hashes(current_salt) != salt -> count && /* In case there are duplicate hashes, num_uncracked_hashes is always less than salt->count, as * num_uncracked_hashes tracks only unique hashes. */ num_uncracked_hashes(current_salt) > salt -> count) update_buffer(salt); current_gws *= iter_count; ret_code = clEnqueueNDRangeKernel(queue[gpu_id], kernels[gpu_id][current_salt], 1, NULL, &current_gws, lws, 0, NULL, NULL); HANDLE_CLERROR(ret_code, "Enque kernel DES_bs_25 failed.\n"); *pcount = mask_mode ? *pcount * mask_int_cand.num_int_cand : *pcount; return extract_info(current_gws, lws, current_salt); } void opencl_DES_bs_h_register_functions(struct fmt_main *fmt) { fmt -> methods.done = &clean_all_buffers; fmt -> methods.reset = &reset; fmt -> methods.set_salt = &set_salt; fmt -> methods.crypt_all = &des_crypt_25; opencl_DES_bs_init_global_variables = &init_global_variables; } #endif /* HAVE_OPENCL */
sectionsModificado.c
#include <stdio.h> #include <omp.h> void funcA() { printf("En funcA: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } void funcB() { printf("En funcB: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } main() { #pragma omp parallel sections { #pragma omp section (void) funcA(); #pragma omp section (void) funcB(); } }
allocate.c
/** * * @file allocate.c * * PLASMA auxiliary routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Jakub Kurzak * @date 2010-11-15 * **/ #include <stdlib.h> #include "common.h" /***************************************************************************//** * **/ void *plasma_shared_alloc(plasma_context_t *plasma, size_t size, int type) { void *memptr; size *= plasma_element_size(type); if (size <= 0) return NULL; //if (posix_memalign(&memptr, STANDARD_PAGE_SIZE, size) != 0) { if ((memptr = malloc(size)) == NULL) { plasma_error("plasma_shared_alloc", "posix_memalign() failed"); return NULL; } if ( plasma->runtime == PLASMA_OMPSS) { #pragma omp register([size]memptr) // printf("shared_alloc::memptr: %p[%d]\n", memptr, size); } return memptr; } /***************************************************************************//** * **/ void plasma_shared_free(plasma_context_t *plasma, void *ptr) { if (ptr == NULL) // somewhat redundant - free() does the same return; if ( plasma->runtime != PLASMA_OMPSS) { free(ptr); } } /***************************************************************************//** * **/ void *plasma_private_alloc(plasma_context_t *plasma, size_t size, int type) { void *memptr; size *= plasma_element_size(type); if (size <= 0) return NULL; //if (posix_memalign(&memptr, CACHE_LINE_SIZE, size) != 0) { if ((memptr = malloc(size)) == NULL) { plasma_error("plasma_private_alloc", "posix_memalign() failed"); return NULL; } if ( plasma->runtime == PLASMA_OMPSS) { #pragma omp register([size]memptr) // printf("private_alloc::memptr: %p[%d]\n", memptr, size); } return memptr; } /***************************************************************************//** * **/ void plasma_private_free(plasma_context_t *plasma, void *ptr) { if (ptr == NULL) // somewhat redundant - free() does the same return; if ( plasma->runtime != PLASMA_OMPSS) { free(ptr); } }
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; register ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickPathExtent]; const char *option; const StringInfo *profile; ssize_t option_type; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if (*magick_path == '\0' && *next->magick == '\0') (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; register const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); p++; length=0; blob=Base64Decode(p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; register Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { register Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { register ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
GB_binop__isle_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32) // A*D function (colscale): GB (_AxD__isle_uint32) // D*A function (rowscale): GB (_DxB__isle_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32) // C=scalar+B GB (_bind1st__isle_uint32) // C=scalar+B' GB (_bind1st_tran__isle_uint32) // C=A+scalar GB (_bind2nd__isle_uint32) // C=A'+scalar GB (_bind2nd_tran__isle_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
detector.c
#include "darknet.h" #include <stdbool.h> #include "image.h" static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; //enum chi_province {wan = 35, shan, jin1, jin2, hu, yu1, meng, xin, zang, qing, lu, yu2, su, zhe, ning, gui1, hei, ji1, liao, jin3, ji3, ming, gan, xiang, ee, yue, qiong, gan2, gui2, yun, chuan}; const char *str_province[] = {"wan", "shan", "jin1", "jin2", "hu", "yu1", "meng", "xin", "zang", "qing", "lu", "yu2", "su", "zhe", "ning", "gui1", "hei", "ji1", "liao", "jin3", "ji3", "ming", "gan", "xiang", "ee", "yue", "qiong", "gan2", "gui2", "yun", "chuan"}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus;//huigb 64,ngpus==1 printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs;//huigb 64 args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); double time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ printf("Resizing\n"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net->max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%d\n", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } time=what_time_is_it_now(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); time=what_time_is_it_now(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); if(i%100==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) do_nms_sort(dets, num, classes, nms); if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) do_nms_sort(dets, nboxes, classes, nms); if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths("data/coco_val_5k.list"); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) do_nms_obj(dets, nboxes, 1, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void get_province(int pro_num, char* chname, char *province_name) { CHINESE_PROVINCE pro_enum;// = pro_num; bool province_find = false; for(pro_enum = wan; pro_enum <= chuan; pro_enum++){ if (strcmp(chname, str_province[pro_enum-35]) == 0){ province_find = true; break; } } if (province_find){ switch(pro_enum){ case wan: strcpy(province_name, "皖"); break; case shan: strcpy(province_name, "陕"); break; case jin1: strcpy(province_name, "京"); break; case jin2: strcpy(province_name, "津"); break; case hu: strcpy(province_name, "沪"); break; case yu1: strcpy(province_name, "渝"); break; case meng: strcpy(province_name, "蒙"); break; case xin: strcpy(province_name, "新"); break; case zang: strcpy(province_name, "藏"); break; case qing: strcpy(province_name, "青"); break; case lu: strcpy(province_name, "鲁"); break; case yu2: strcpy(province_name, "豫"); break; case su: strcpy(province_name, "苏"); break; case zhe: strcpy(province_name, "浙"); break; case ning: strcpy(province_name, "宁"); break; case gui1: strcpy(province_name, "桂"); break; case hei: strcpy(province_name, "黑"); break; case ji1: strcpy(province_name, "吉"); break; case liao: strcpy(province_name, "辽"); break; case jin3: strcpy(province_name, "晋"); break; case ji3: strcpy(province_name, "冀"); break; case ming: strcpy(province_name, "闽"); break; case gan: strcpy(province_name, "赣"); break; case xiang: strcpy(province_name, "湘"); break; case ee: strcpy(province_name, "鄂"); break; case yue: strcpy(province_name, "粤"); break; case qiong: strcpy(province_name, "琼"); break; case gan2: strcpy(province_name, "甘"); break; case gui2: strcpy(province_name, "贵"); break; case yun: strcpy(province_name, "云"); break; case chuan: strcpy(province_name, "川"); break; default: strcpy(province_name, "无"); } }else printf("unknown province name!\n"); } //huiguobao definition get_chinese_char func: void get_chinese_lp(detection *dets, int num, float thresh, char **names, int classes, char* get_name) { int i,j; for(i = 0; i < num; ++i){ //char labelstr[20] = {0}; int class = -1; for(j = 0; j < classes; ++j){ if (dets[i].prob[j] > thresh){ if (class < 0) { if (j >= 35){ char namestr[3] = {0}; get_province(j, names[j], namestr); strcat(get_name, namestr); }else strcat(get_name, names[j]); class = j; } else { strcat(get_name, ", "); if (j >= 35){ char namestr[3] = {0}; get_province(j, names[j], namestr); strcat(get_name, namestr); }else strcat(get_name, names[j]); } printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100); } } } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) { //list *options = read_data_cfg(datacfg); //char *name_list = option_find_str(options, "names", "data/names.list"); //char **names = get_labels(name_list); //metadata meta = get_metadata("./lp_net/lpdetect/lp.data"); network *net = load_network("./lp_net/lpdetect/yolov3-lp.cfg", "./lp_net/lpdetect/yolov3-lp_final.weights", 0); metadata meta_ocr = get_metadata("./lp_net/lpscr/lpscr.data"); network *net_ocr = load_network("./lp_net/lpscr/lpscr-net.cfg", "./lp_net/lpscr/lpscr-net_final.weights", 0); //image **alphabet = load_alphabet();//input data/labels //set_batch_network(net, 1); //set_batch_network(net_ocr, 1); //srand(2222222); //double time; char buff[256]; char *input = buff; float nms=.45; //char ch_name[20]; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); //image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; layer l_ocr = net_ocr->layers[net_ocr->n-1]; //float *X = sized.data; //time=what_time_is_it_now(); //network_predict(net, X); //printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); network_predict_image(net,im); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); int i; //huiguobao crop image: for(i = 0; i < nboxes; ++i){ char ch_name[20] = {0}; //char save_name[20] = {0}; int sign = -1; int j; //image im_crop = get_crop_detect(im, dets, i, thresh, meta.names, l.classes); for(j = 0; j < l.classes; ++j){ if (dets[i].prob[j] > thresh) sign = j; } if (sign >= 0){ image im_crop = get_crop_detect(im, dets[i].bbox); if (im_crop.w == 0) break; network_predict_image(net_ocr, im_crop); int nboxes_ocr = 0; detection *dets_ocr = get_network_boxes(net_ocr, im_crop.w, im_crop.h, thresh, hier_thresh, 0, 1, &nboxes_ocr); do_nms_sort(dets_ocr, nboxes_ocr, l_ocr.classes, nms); do_dets_sort(dets_ocr, nboxes_ocr); get_chinese_lp(dets_ocr, nboxes_ocr, thresh, meta_ocr.names, l_ocr.classes, ch_name); if (strlen(ch_name)){ //sprintf(ch_name, "%s%04d", outfile, i);//"%s%04d":reserve four 0 //save_image(im_crop, ch_name); printf("chinese LP name:%s\n", ch_name); } //draw_detections(im_crop, dets_ocr, nboxes_ocr, thresh, meta_ocr.names, alphabet, l_ocr.classes); free_detections(dets_ocr, nboxes_ocr); /* if (outfile){ sprintf(save_name, "%s%02d", outfile, i);//"%s%04d":reserve four 0 save_image(im_crop, save_name); }else{ #ifdef OPENCV make_window("predictions_ocr", 512, 512, 0); show_image(im_crop, "predictions_ocr", 0); #endif } */ free_image(im_crop); } draw_bbox(im, dets[i].bbox, 5, 1,0,0); } //draw_detections(im, dets, nboxes, thresh, meta.names, alphabet, l.classes); free_detections(dets, nboxes); /* if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV make_window("predictions", 512, 512, 0); show_image(im, "predictions", 0); #endif } */ free_image(im); //free_image(sized); if (filename) break; } } void test_demo(int cam_index, char *filename, char *prefix, int w, int h, int frames, int fullscreen) { //list *options = read_data_cfg(datacfg); //char *name_list = option_find_str(options, "names", "data/names.list"); //char **names = get_labels(name_list); //metadata meta = get_metadata("./lp_net/lpdetect/lp.data"); network *net = load_network("./lp_net/lpdetect/yolov3-lp.cfg", "./lp_net/lpdetect/yolov3-lp_final.weights", 0); metadata meta_ocr = get_metadata("./lp_net/lpscr/lpscr.data"); network *net_ocr = load_network("./lp_net/lpscr/lpscr-net.cfg", "./lp_net/lpscr/lpscr-net_final.weights", 0); //image **alphabet = load_alphabet();//input data/labels //set_batch_network(net, 1); //set_batch_network(net_ocr, 1); //srand(2222222); double time; //char buff[256]; //char *input = buff; float nms=.45; void * cap; float thresh = 0.5; float hier_thresh = 0.5; //char ch_name[20]; if(filename){ //printf("video file: %s\n", filename); cap = open_video_stream(filename, 0, 0, 0, 0); }else{ cap = open_video_stream(0, cam_index, w, h, frames); } if(!cap) error("Couldn't connect to webcam.\n"); if(!prefix){ make_window("Demo", 1352, 1013, fullscreen); } while(1){ image im = get_image_from_stream(cap); //image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; layer l_ocr = net_ocr->layers[net_ocr->n-1]; //float *X = sized.data; time=what_time_is_it_now(); //network_predict(net, X); network_predict_image(net,im); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); int i; //huiguobao crop image: for(i = 0; i < nboxes; ++i){ char ch_name[20] = {0}; //char save_name[20] = {0}; int sign = -1; int j; //image im_crop = get_crop_detect(im, dets, i, thresh, meta.names, l.classes); for(j = 0; j < l.classes; ++j){ if (dets[i].prob[j] > thresh) sign = j; } if (sign >= 0){ image im_crop = get_crop_detect(im, dets[i].bbox); if (im_crop.w == 0) break; network_predict_image(net_ocr, im_crop); int nboxes_ocr = 0; detection *dets_ocr = get_network_boxes(net_ocr, im_crop.w, im_crop.h, thresh, hier_thresh, 0, 1, &nboxes_ocr); do_nms_sort(dets_ocr, nboxes_ocr, l_ocr.classes, nms); do_dets_sort(dets_ocr, nboxes_ocr); get_chinese_lp(dets_ocr, nboxes_ocr, thresh, meta_ocr.names, l_ocr.classes, ch_name); if (strlen(ch_name)){ //sprintf(ch_name, "%s%04d", outfile, i);//"%s%04d":reserve four 0 //save_image(im_crop, ch_name); printf("chinese LP name:%s\n", ch_name); } //draw_detections(im_crop, dets_ocr, nboxes_ocr, thresh, meta_ocr.names, alphabet, l_ocr.classes); free_detections(dets_ocr, nboxes_ocr); /* if (outfile){ sprintf(save_name, "%s%02d", outfile, i);//"%s%04d":reserve four 0 save_image(im_crop, save_name); }else{ #ifdef OPENCV make_window("predictions_ocr", 512, 512, 0); show_image(im_crop, "predictions_ocr", 0); #endif } */ free_image(im_crop); } draw_bbox(im, dets[i].bbox, 5, 1,0,0); } //draw_detections(im, dets, nboxes, thresh, meta.names, alphabet, l.classes); free_detections(dets, nboxes); /* if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV make_window("predictions", 512, 512, 0); show_image(im, "predictions", 0); #endif } */ int c = show_image(im, "Demo", 1); if (c != -1) c = c%256; free_image(im); //free_image(sized); printf("one thread take in %f seconds.\n", what_time_is_it_now()-time); if (filename || c == 27) break; } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); int cam_index = find_int_arg(argc, argv, "-c", 0); int avg = find_int_arg(argc, argv, "-avg", 3); //if(argc < 4){ // fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); // return; //} char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index;//#huigb 0 gpus = &gpu; ngpus = 1; } int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *filename = find_char_arg(argc, argv, "-filename", 0); demo(cam_index, filename, prefix, avg, width, height, fps, fullscreen); // huigb rewrite demo, in demo.c, multi threads }
5.race1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 0; j < N; j++) // Array Out of Bound Access A[i][j] = A[i][j - 1]; } // CHECK: Data Race detected // END
GB_binop__lt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_01__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_02__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_03__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int8) // A*D function (colscale): GB (_AxD__lt_int8) // D*A function (rowscale): GB (_DxB__lt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__lt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__lt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int8) // C=scalar+B GB (_bind1st__lt_int8) // C=scalar+B' GB (_bind1st_tran__lt_int8) // C=A+scalar GB (_bind2nd__lt_int8) // C=A'+scalar GB (_bind2nd_tran__lt_int8) // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_ctrssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrssq.c, normal z -> c, Fri Sep 28 17:38:24 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ // This computation also shows up in plasma_core_csyssq() and can be factored out. // LAPACK does real and imag components separately in classq. static inline void ssq(plasma_complex32_t value, float *scale, float *sumsq) { float absa = cabsf(value); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } /******************************************************************************/ __attribute__((weak)) void plasma_core_ctrssq(plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex32_t *A, int lda, float *scale, float *sumsq) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int j = 0; j < n; j++) { ssq(A[lda*j], scale, sumsq); for (int i = 1; i < imin(j+1, m); i++) { ssq(A[lda*j+i], scale, sumsq); } } } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { ssq(1.0, scale, sumsq); for (int i = 0; i < j; i++) { ssq(A[lda*j+i], scale, sumsq); } } for (; j < n; j++) { ssq(A[lda*j], scale, sumsq); for (int i = 1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { for (int j = 0; j < imin(n, m); j++) { ssq(A[lda*j+j], scale, sumsq); for (int i = j+1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } else { // PlasmaUnit for (int j = 0; j < imin(n, m); j++) { ssq(1.0, scale, sumsq); for (int i = j+1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } } } /******************************************************************************/ void plasma_core_omp_ctrssq(plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex32_t *A, int lda, float *scale, float *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; plasma_core_ctrssq(uplo, diag, m, n, A, lda, scale, sumsq); } } }
threadprivate2.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int counter=0; #ifdef _OPENMP #pragma omp threadprivate(counter) #endif int main(void) { int i; #pragma omp parallel for for(i=0;i<10000;i++) counter++; #pragma omp parallel for for(i=0;i<10000;i++) counter+=3; #pragma omp parallel printf("counter=%d\n",counter); }
nashville_parallel.c
#ifdef __linux__ #define _GNU_SOURCE #endif #include <stdio.h> #include <assert.h> #include <math.h> #include <string.h> #include <unistd.h> #include <sys/stat.h> #include <sys/time.h> #include <MagickWand.h> #include <omp.h> #include "nashville.h" typedef struct piece_ { MagickWand *piece; int index; } piece_t; int compare(const void *a, const void *b) { const piece_t *left = (const piece_t *)a; const piece_t *right = (const piece_t *)b; return left->index - right->index; } MagickWand *aggregate_seq(piece_t *pieces, int count) { MagickWand *results = NewMagickWand(); MagickResetIterator(results); for (int i = 0; i < count; i++) { MagickSetLastIterator(results); MagickAddImage(results, pieces[i].piece); } MagickResetIterator(results); MagickWand *final = MagickAppendImages(results, 1); DestroyMagickWand(results); return final; } MagickWand *aggregate_par(piece_t *pieces, int count, int threads) { // Holds aggregation state. MagickWand **results = (MagickWand **)malloc(sizeof(MagickWand *) * threads); for (int i = 0; i < threads; i++) { results[i] = NewMagickWand(); MagickResetIterator(results[i]); } int values_per_thread = count / threads; printf("values per piece: %d\n", values_per_thread); #pragma omp parallel for for (int i = 0; i < threads; i++) { int start = i * values_per_thread; int end = (i + 1) * values_per_thread; if (i == threads - 1) { end = count; } MagickWand *result = results[i]; // printf("thread %d: %d->%d\n", omp_get_thread_num(), start, end); for (int j = start; j < end; j++) { MagickSetLastIterator(result); MagickAddImage(result, pieces[j].piece); } MagickResetIterator(result); MagickWand *final = MagickAppendImages(result, 1); result = DestroyMagickWand(result); results[i] = final; } MagickWand *final_iterator = NewMagickWand(); MagickResetIterator(final_iterator); for (int i = 0; i < threads; i++) { MagickSetLastIterator(final_iterator); MagickAddImage(final_iterator, results[i]); } MagickResetIterator(final_iterator); MagickWand *final = MagickAppendImages(final_iterator, 1); for (int i = 0; i < threads; i++) { DestroyMagickWand(results[i]); } free(results); return final; } MagickWand *colortone_parallel(MagickWand *input_wand, const char *color, const char *compose_opt, int negate, int threads) { size_t width = MagickGetImageWidth(input_wand); size_t height = MagickGetImageHeight(input_wand); printf("Image is (%ld x %ld) pixels\n", width, height); // We want each chunk to be close to the L2 cache size. const int l2_cache_size_bytes = 262144 * 3; // Number of rows to process per batch. size_t region_height = l2_cache_size_bytes / width; if (region_height == 0) { region_height = 1; } region_height = 199; // TODO this might shave off a few things. int num_regions = height / region_height; printf("Regions: %d\n", num_regions); struct timeval start, end, diff; gettimeofday(&start, NULL); piece_t *pieces = malloc(num_regions * sizeof(piece_t)); #pragma omp parallel for for (int i = 0; i < num_regions; i++) { /* printf("%d Looking at region (%ld -> %ld, %ld -> %ld)\n", i, 0l, 0l + width, region_height * i, region_height * i + region_height); */ MagickWand *wand = MagickGetImageRegion(input_wand, width, region_height, 0, region_height * i); MagickWand *colorized_wand = CloneMagickWand(wand); MagickWand *colorspace_wand = CloneMagickWand(wand); do_colortone(wand, color, compose_opt, negate, colorized_wand, colorspace_wand); do_colortone(wand, color, compose_opt, negate, colorized_wand, colorspace_wand); MagickModulateImage(wand, HUE, SATURATION, VALUE); MagickGammaImage(wand, GAMMA); colorized_wand = DestroyMagickWand(colorized_wand); colorspace_wand = DestroyMagickWand(colorspace_wand); pieces[i].index = i; pieces[i].piece = wand; } gettimeofday(&end, NULL); timersub(&end, &start, &diff); double runtime = (double)diff.tv_sec + ((double)diff.tv_usec / 1000000.0); printf("Processing runtime: %.3f seconds\n", runtime); fflush(stdout); gettimeofday(&start, NULL); // Sort pieces by their index. qsort(pieces, num_regions, sizeof(piece_t), compare); gettimeofday(&end, NULL); timersub(&end, &start, &diff); runtime = (double)diff.tv_sec + ((double)diff.tv_usec / 1000000.0); printf("Sort runtime: %.3f seconds\n", runtime); fflush(stdout); gettimeofday(&start, NULL); MagickWand *final; if (num_regions / threads > 16) { printf("parallel aggregation\n"); final = aggregate_par(pieces, num_regions, threads); } else { printf("sequential aggregation\n"); final = aggregate_seq(pieces, num_regions); } free(pieces); gettimeofday(&end, NULL); timersub(&end, &start, &diff); runtime = (double)diff.tv_sec + ((double)diff.tv_usec / 1000000.0); printf("Total aggregation runtime: %.3f seconds\n", runtime); fflush(stdout); return final; }
convolution_1x1_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-4a-inch/4a-outch/4b kernel_tm_pack4.create(2 * 1, inch / 4, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; const float* k4 = (const float*)kernel + (q + 4) * inch; const float* k5 = (const float*)kernel + (q + 5) * inch; const float* k6 = (const float*)kernel + (q + 6) * inch; const float* k7 = (const float*)kernel + (q + 7) * inch; __fp16* g0 = kernel_tm_pack4.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = (__fp16)k0[0]; g0[1] = (__fp16)k1[0]; g0[2] = (__fp16)k2[0]; g0[3] = (__fp16)k3[0]; g0[4] = (__fp16)k4[0]; g0[5] = (__fp16)k5[0]; g0[6] = (__fp16)k6[0]; g0[7] = (__fp16)k7[0]; g0[8] = (__fp16)k0[1]; g0[9] = (__fp16)k1[1]; g0[10] = (__fp16)k2[1]; g0[11] = (__fp16)k3[1]; g0[12] = (__fp16)k4[1]; g0[13] = (__fp16)k5[1]; g0[14] = (__fp16)k6[1]; g0[15] = (__fp16)k7[1]; g0[16] = (__fp16)k0[2]; g0[17] = (__fp16)k1[2]; g0[18] = (__fp16)k2[2]; g0[19] = (__fp16)k3[2]; g0[20] = (__fp16)k4[2]; g0[21] = (__fp16)k5[2]; g0[22] = (__fp16)k6[2]; g0[23] = (__fp16)k7[2]; g0[24] = (__fp16)k0[3]; g0[25] = (__fp16)k1[3]; g0[26] = (__fp16)k2[3]; g0[27] = (__fp16)k3[3]; g0[28] = (__fp16)k4[3]; g0[29] = (__fp16)k5[3]; g0[30] = (__fp16)k6[3]; g0[31] = (__fp16)k7[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; k4 += 4; k5 += 4; k6 += 4; k7 += 4; g0 += 32; } } for (; q + 3 < outch; q += 4) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; __fp16* g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int p = 0; p + 3 < inch; p += 4) { g0[0] = (__fp16)k0[0]; g0[1] = (__fp16)k1[0]; g0[2] = (__fp16)k2[0]; g0[3] = (__fp16)k3[0]; g0[4] = (__fp16)k0[1]; g0[5] = (__fp16)k1[1]; g0[6] = (__fp16)k2[1]; g0[7] = (__fp16)k3[1]; g0[8] = (__fp16)k0[2]; g0[9] = (__fp16)k1[2]; g0[10] = (__fp16)k2[2]; g0[11] = (__fp16)k3[2]; g0[12] = (__fp16)k0[3]; g0[13] = (__fp16)k1[3]; g0[14] = (__fp16)k2[3]; g0[15] = (__fp16)k3[3]; k0 += 4; k1 += 4; k2 += 4; k3 += 4; g0 += 16; } } } static void conv1x1s1_sgemm_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start = 0; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 4; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { // transpose 4x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { // transpose 4x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 4; } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "mov v28.16b, %10.16b \n" "mov v29.16b, %10.16b \n" "mov v30.16b, %10.16b \n" "mov v31.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v28.8h, v4.8h, v0.h[4] \n" "fmla v29.8h, v4.8h, v0.h[5] \n" "fmla v30.8h, v4.8h, v0.h[6] \n" "fmla v31.8h, v4.8h, v0.h[7] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v28.8h, v5.8h, v1.h[4] \n" "fmla v29.8h, v5.8h, v1.h[5] \n" "fmla v30.8h, v5.8h, v1.h[6] \n" "fmla v31.8h, v5.8h, v1.h[7] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "fmla v28.8h, v6.8h, v2.h[4] \n" "fmla v29.8h, v6.8h, v2.h[5] \n" "fmla v30.8h, v6.8h, v2.h[6] \n" "fmla v31.8h, v6.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "fmla v28.8h, v7.8h, v3.h[4] \n" "fmla v29.8h, v7.8h, v3.h[5] \n" "fmla v30.8h, v7.8h, v3.h[6] \n" "fmla v31.8h, v7.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(pp); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(pp); float16x8_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); _sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3); kptr += 32; tmpptr += 4; } vst1_f16(outptr0, vget_low_f16(_sum0)); vst1_f16(outptr1, vget_high_f16(_sum0)); outptr0 += 4; outptr1 += 4; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x4_t _bias0 = vld1_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "mov v28.16b, %8.16b \n" "mov v29.16b, %8.16b \n" "mov v30.16b, %8.16b \n" "mov v31.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v28.4h, v4.4h, v0.h[4] \n" "fmla v29.4h, v4.4h, v0.h[5] \n" "fmla v30.4h, v4.4h, v0.h[6] \n" "fmla v31.4h, v4.4h, v0.h[7] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v28.4h, v5.4h, v1.h[4] \n" "fmla v29.4h, v5.4h, v1.h[5] \n" "fmla v30.4h, v5.4h, v1.h[6] \n" "fmla v31.4h, v5.4h, v1.h[7] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "fmla v28.4h, v6.4h, v2.h[4] \n" "fmla v29.4h, v6.4h, v2.h[5] \n" "fmla v30.4h, v6.4h, v2.h[6] \n" "fmla v31.4h, v6.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "fmla v28.4h, v7.4h, v3.h[4] \n" "fmla v29.4h, v7.4h, v3.h[5] \n" "fmla v30.4h, v7.4h, v3.h[6] \n" "fmla v31.4h, v7.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); float16x4_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(tmpptr); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); _sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3); kptr += 16; tmpptr += 4; } vst1_f16(outptr0, _sum0); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const short bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // short sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x4_t _v0 = vld1_f16(r0); float16x4_t _v1 = vld1_f16(r0 + 8); float16x4_t _v2 = vld1_f16(r0 + 16); float16x4_t _v3 = vld1_f16(r0 + 24); float16x8_t _v01 = vcombine_f16(_v0, _v1); float16x8_t _v23 = vcombine_f16(_v2, _v3); vst1q_f16(outptr, _v01); vst1q_f16(outptr + 8, _v23); r0 += 32; outptr += 16; } for (; j + 1 < outw; j += 2) { float16x4_t _v0 = vld1_f16(r0); float16x4_t _v1 = vld1_f16(r0 + 8); float16x8_t _v = vcombine_f16(_v0, _v1); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { float16x4_t _v = vld1_f16(r0); vst1_f16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
invertc.c
/* cc -lm t4.c -qsmp */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #define FLT double /* utility routines */ FLT system_clock(FLT *x); FLT **matrix(int nrl,int nrh,int ncl,int nch); /* work routines */ void mset(FLT **m, int n, int in); FLT mcheck(FLT **m, int n, int in); void over(FLT ** mat,int size); int main(int argc,char *argv[]) { FLT **m1,**m2,**m3,**m4; FLT t0_start; FLT t1_start,t1_end,e1; FLT t2_start,t2_end,e2; FLT t3_start,t3_end,e3; FLT t4_start,t4_end,e4; int n; n=750; m1=matrix(1,n,1,n); m2=matrix(1,n,1,n); m3=matrix(1,n,1,n); m4=matrix(1,n,1,n); mset(m1,n,10); mset(m2,n,20); mset(m3,n,30); mset(m4,n,40); system_clock(&t0_start); #pragma omp parallel sections { #pragma omp section { system_clock(&t1_start); over(m1,n); over(m1,n); system_clock(&t1_end); e1=mcheck(m1,n,10); t1_start=t1_start-t0_start; t1_end=t1_end-t0_start; } #pragma omp section { system_clock(&t2_start); over(m2,n); over(m2,n); system_clock(&t2_end); e2=mcheck(m2,n,20); t2_start=t2_start-t0_start; t2_end=t2_end-t0_start; } #pragma omp section { system_clock(&t3_start); over(m3,n); over(m3,n); system_clock(&t3_end); e3=mcheck(m3,n,30); t3_start=t3_start-t0_start; t3_end=t3_end-t0_start; } #pragma omp section { system_clock(&t4_start); over(m4,n); over(m4,n); system_clock(&t4_end); e4=mcheck(m4,n,40); t4_start=t4_start-t0_start; t4_end=t4_end-t0_start; } } printf("section 1 start time= %10.5g end time= %10.5g error= %g\n",t1_start,t1_end,e1); printf("section 2 start time= %10.5g end time= %10.5g error= %g\n",t2_start,t2_end,e2); printf("section 3 start time= %10.5g end time= %10.5g error= %g\n",t3_start,t3_end,e3); printf("section 4 start time= %10.5g end time= %10.5g error= %g\n",t4_start,t4_end,e4); return 0; } void mset(FLT **m, int n, int in) { int i,j; for(i=1;i<=n;i++) for(j=1;j<=n;j++) { if(i == j) { m[i][j]=in; } else { m[i][j]=1; } } } FLT mcheck(FLT **m, int n, int in) { int i,j; FLT x; x=0.0; for(i=1;i<=n;i++) for(j=1;j<=n;j++) { if(i == j) { x=x+fabs(m[i][j]-in); } else { x=x+fabs(m[i][j]-1); } } return x; } void over(FLT ** mat,int size) { int k, jj, kp1, i, j, l, krow, irow; FLT pivot, temp; FLT sw[2000][2]; for (k = 1 ;k<= size ; k++) { jj = k; if (k != size) { kp1 = k + 1; pivot = fabs(mat[k][k]); for( i = kp1;i<= size ;i++) { temp = fabs(mat[i][k]); if (pivot < temp) { pivot = temp; jj = i; } } } sw[k][0] =k; sw[k][1] = jj; if (jj != k) for (j = 1 ;j<= size; j++) { temp = mat[jj][j]; mat[jj][j] = mat[k][ j]; mat[k][j] = temp; } for (j = 1 ;j<= size; j++) if (j != k) mat[k][j] = mat[k][j] / mat[k][k]; mat[k][k] = 1.0 / mat[k][k]; for (i = 1; i<=size; i++) if (i != k) for (j = 1;j<=size; j++) if (j != k) mat[i][j] = mat[i][j] - mat[k][j] * mat[i][k]; for (i = 1;i<=size;i++) if (i != k) mat[i][k] = -mat[i][k] * mat[k][k]; } for (l = 1; l<=size; ++l) { k = size - l + 1; krow = sw[k][0]; irow = sw[k][1]; if (krow != irow) for (i = 1; i<= size; ++i) { temp = mat[i][krow]; mat[i][krow] = mat[i][irow]; mat[i][irow] = temp; } } } /* The routine matrix was adapted from Numerical Recipes in C The Art of Scientific Computing Press, Flannery, Teukolsky, Vetting Cambridge University Press, 1988. */ FLT **matrix(int nrl,int nrh,int ncl,int nch) { int i; FLT **m; m=(FLT **) malloc((unsigned) (nrh-nrl+1)*sizeof(FLT*)); if (!m){ printf("allocation failure 1 in matrix()\n"); exit(1); } m -= nrl; for(i=nrl;i<=nrh;i++) { if(i == nrl){ m[i]=(FLT *) malloc((unsigned) (nrh-nrl+1)*(nch-ncl+1)*sizeof(FLT)); if (!m[i]){ printf("allocation failure 2 in matrix()\n"); exit(1); } m[i] -= ncl; } else { m[i]=m[i-1]+(nch-ncl+1); } } return m; } FLT system_clock(FLT *x) { FLT t; FLT six=1.0e-6; struct timeval tb; struct timezone tz; gettimeofday(&tb,&tz); t=(FLT)tb.tv_sec+((FLT)tb.tv_usec)*six; if(x){ *x=t; } return(t); }
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // use the id(...) matcher around the match expressions that match the nodes // you want to access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl()))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the id(...) calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::ast_type_traits::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// If the provided matcher matches a node, binds the node to \c ID. /// /// FIXME: Do we want to support this now that we have bind()? template <typename T> internal::Matcher<T> id(StringRef ID, const internal::BindableMatcher<T> &InnerMatcher) { return InnerMatcher.bind(ID); } /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(const std::string &Name) { return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name); } /// Matches C++ classes that are directly or indirectly derived from /// a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_MATCHER_P(CXXRecordDecl, isDerivedFrom, internal::Matcher<NamedDecl>, Base) { return Finder->classIsDerivedFrom(&Node, Base, Builder); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDerivedFrom, std::string, BaseName, 1) { assert(!BaseName.empty()); return isDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, internal::Matcher<NamedDecl>, Base, 0) { return Matcher<CXXRecordDecl>(anyOf(Base, isDerivedFrom(Base))) .matches(Node, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, std::string, BaseName, 1) { assert(!BaseName.empty()); return isSameOrDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = ast_type_traits::DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; }) /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches RecordDecl object that are spelled with "struct." /// /// Example matches S, but not C or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isStruct) { return Node.isStruct(); } /// Matches RecordDecl object that are spelled with "union." /// /// Example matches U, but not C or S. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isUnion) { return Node.isUnion(); } /// Matches RecordDecl object that are spelled with "class." /// /// Example matches C, but not S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isClass) { return Node.isClass(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does match 'return > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(intgerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor. /// /// In C++17 copy elidable constructors are no longer being /// generated in the AST as it is not permitted by the standard. They are /// however part of the AST in C++14 and earlier. Therefore, to write a matcher /// that works in all language modes, the matcher has to skip elidable /// constructor AST nodes if they appear in the AST. This matcher can be used to /// skip those elidable constructors. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(any( /// ignoringElidableConstructorCall(callExpr()), /// exprWithCleanups(ignoringElidableConstructorCall(callExpr()))))`` /// matches ``H D = G()`` AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(&Node)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the Stmt AST node that is marked as being the structured-block /// of an OpenMP executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// \endcode /// /// ``stmt(isOMPStructuredBlock()))`` matches ``{}``. AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == OMPC_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == OMPC_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
GB_binop__fmod_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__fmod_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32) // C=scalar+B GB (_bind1st__fmod_fp32) // C=scalar+B' GB (_bind1st_tran__fmod_fp32) // C=A+scalar GB (_bind2nd__fmod_fp32) // C=A'+scalar GB (_bind2nd_tran__fmod_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = fmodf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmodf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__fmod_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__fmod_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__fmod_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__fmod_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__fmod_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = fmodf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__fmod_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = fmodf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmodf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__fmod_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmodf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__fmod_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rawmd5u_fmt_plug.c
/* * Thick raw-md5-unicode (come-back :) * * This software is Copyright (c) 2011 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawmd5uthick; #elif FMT_REGISTERS_H john_register_one(&fmt_rawmd5uthick); #else #include <string.h> #include "arch.h" #ifdef MMX_COEF #define NBKEYS (MMX_COEF * MD5_SSE_PARA) #endif #include "sse-intrinsics.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "memory.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-MD5u" #define FORMAT_NAME "" #define ALGORITHM_NAME "md5(unicode($p)) " MD5_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #ifdef MMX_COEF #define BLOCK_LOOPS 1 #define PLAINTEXT_LENGTH 27 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS * BLOCK_LOOPS #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*16*MMX_COEF*4 ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef MMX_COEF static unsigned char (*saved_key); static unsigned char (*crypt_key); static unsigned int (**buf_ptr); #else static MD5_CTX ctx; static int saved_key_length; static UTF16 saved_key[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 crypt_key[BINARY_SIZE / 4]; #endif /* Note some plaintexts will be replaced in init() if running UTF-8 */ static struct fmt_tests tests[] = { {"16c47151c18ac087cd12b3a70746c790", "test1"}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"9c3abef89ff76f8acd80eae37b35f64f", "test2"}, {"849ee1b88b5d887bdb058180a666b450", "test3"}, {"8c4cb7e8b33b56a833cdaa8673f3b425", "test4"}, {"537e738b1ac5551f65106368dc301ece", "thatsworking"}, {NULL} }; static void set_key_utf8(char *_key, int index); static void set_key_CP(char *_key, int index); static void init(struct fmt_main *self) { #if MMX_COEF int i; #endif if (pers_opts.target_enc == UTF_8) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_utf8; #if MMX_COEF /* kick it up from 27. We will truncate in setkey_utf8() */ self->params.plaintext_length = 3 * PLAINTEXT_LENGTH; #endif tests[1].ciphertext = "94a4e171de16580742c4d141e6607bf7"; tests[1].plaintext = "\xE2\x82\xAC"; // Euro sign tests[2].ciphertext = "03c60810f0e54d16e826aca385d776c8"; tests[2].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; // 2 x euro tests[3].ciphertext = "2d554433d7cde7ec8d16aaf126c3be6b"; tests[3].plaintext = "\xE2\x82\xAC\xC3\xBC"; // euro and u-umlaut tests[4].ciphertext = "8007d9070b27db7b30433df2cd10abc1"; tests[4].plaintext = "\xC3\xBC\xE2\x82\xAC"; // u-umlaut and euro } else { if (pers_opts.target_enc != ASCII && pers_opts.target_enc != ISO_8859_1) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_CP; } if (CP_to_Unicode[0xfc] == 0x00fc) { tests[1].ciphertext = "ea7ab2b5c07650badab30790d0c9b63e"; tests[1].plaintext = "\xFC"; // German u-umlaut in iso-8859-1 tests[2].ciphertext = "f0a0b9f1dea0e458cec9a284ff434d44"; tests[2].plaintext = "\xFC\xFC"; tests[3].ciphertext = "d25a0b436b768777cc9a343d283dbf5a"; tests[3].plaintext = "\xFC\xFC\xFC"; tests[4].ciphertext = "719917322bf12168f8c55939e4fec8de"; tests[4].plaintext = "\xFC\xFC\xFC\xFC"; } } #if MMX_COEF saved_key = mem_calloc_tiny(sizeof(*saved_key) * 64*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * BINARY_SIZE*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); buf_ptr = mem_calloc_tiny(sizeof(*buf_ptr) * self->params.max_keys_per_crypt, sizeof(*buf_ptr)); for (i=0; i<self->params.max_keys_per_crypt; i++) buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)]; #endif } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[32+12+1]; if (!strncmp(ciphertext, "$dynamic_29$", 12)) ciphertext += 12; strcpy(out, "$dynamic_29$"); memcpy(&out[12], ciphertext, 32); out[sizeof(out)-1] = 0; strlwr(&out[12]); return out; } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (!strncmp(ciphertext, "$dynamic_29$", 12)) ciphertext += 12; for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH) return 1; else return 0; } static void *binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[BINARY_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext+=12; for (i=0; i<4; i++) { temp = (atoi16[ARCH_INDEX(ciphertext[i*8+0])])<<4; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+1])]); temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+2])])<<12; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+3])])<<8; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+4])])<<20; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+5])])<<16; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+6])])<<28; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+7])])<<24; #if ARCH_LITTLE_ENDIAN out[i]=temp; #else out[i]=JOHNSWAP(temp); #endif } return out; } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key(char *_key, int index) { #ifdef MMX_COEF const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += MMX_COEF; } *keybuf_word = 0x80; key_cleaning: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } ((unsigned int *)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key; while (*s) *d++ = *s++; *d = 0; saved_key_length = (int)((char*)d - (char*)saved_key); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_key_length = (int)((char*)d - (char*)saved_key); #endif #endif } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef MMX_COEF const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += MMX_COEF; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } ((unsigned int *)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4; #else saved_key_length = enc_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_key_length < 0) saved_key_length = strlen16(saved_key); #endif } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef MMX_COEF const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = buf_ptr[index]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += MMX_COEF; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += MMX_COEF; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += MMX_COEF; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += MMX_COEF; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } ((unsigned int *)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4; #else saved_key_length = utf8_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_key_length < 0) saved_key_length = strlen16(saved_key); #endif } static char *get_key(int index) { #ifdef MMX_COEF // Get the key back from the key buffer, from UCS-2 unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1]; unsigned int md5_size=0; unsigned int i=0; for(; md5_size < PLAINTEXT_LENGTH; i += MMX_COEF, md5_size++) { key[md5_size] = keybuffer[i]; key[md5_size+1] = keybuffer[i] >> 16; if (key[md5_size] == 0x80 && key[md5_size+1] == 0) { key[md5_size] = 0; break; } ++md5_size; if (key[md5_size] == 0x80 && ((keybuffer[i+MMX_COEF]&0xFFFF) == 0 || md5_size == PLAINTEXT_LENGTH)) { key[md5_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key); #endif } static int cmp_all(void *binary, int count) { #ifdef MMX_COEF unsigned int x,y=0; for(;y<MD5_SSE_PARA*BLOCK_LOOPS;y++) for(x=0;x<MMX_COEF;x++) { if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] ) return 1; } return 0; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_exact(char *source, int count){ return (1); } static int cmp_one(void *binary, int index) { #ifdef MMX_COEF unsigned int x,y; x = index&3; y = index/4; if( ((ARCH_WORD_32*)binary)[0] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] ) return 0; if( ((ARCH_WORD_32*)binary)[1] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4+MMX_COEF] ) return 0; if( ((ARCH_WORD_32*)binary)[2] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4+2*MMX_COEF] ) return 0; if( ((ARCH_WORD_32*)binary)[3] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4+3*MMX_COEF] ) return 0; return 1; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; #if defined(MMX_COEF) #if (BLOCK_LOOPS > 1) int i; // This was an experiment. It's not used (unless you bump BLOCK_LOOPS), // cause it does not scale well. We would need to parallelize set_key() #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < BLOCK_LOOPS; i++) SSEmd5body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*BINARY_SIZE], NULL, SSEi_MIXED_IN); #else SSEmd5body(saved_key, (unsigned int*)crypt_key, NULL, SSEi_MIXED_IN); #endif #else MD5_Init( &ctx ); MD5_Update(&ctx, (unsigned char*)saved_key, saved_key_length); MD5_Final((unsigned char*) crypt_key, &ctx); #endif return count; } #ifdef MMX_COEF static int get_hash_0(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xf; } static int get_hash_1(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xff; } static int get_hash_2(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xfff; } static int get_hash_3(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xffff; } static int get_hash_4(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xfffff; } static int get_hash_5(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xffffff; } static int get_hash_6(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0x7ffffff; } #else static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xf; } static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xff; } static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xfff; } static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xffff; } static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xfffff; } static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xffffff; } static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0x7ffffff; } #endif struct fmt_main fmt_rawmd5uthick = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if (BLOCK_LOOPS > 1) && defined(SSE_MD5_PARA) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
ex2.c
#include <stdio.h> #include <omp.h> int main(void) { int threadId; #pragma omp parallel { threadId = omp_get_thread_num(); printf("\nOi %d\n", threadId); } return 0; }
convolution_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int sum = 0; const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _val = _mm_loadl_epi64((const __m128i*)(sptr + space_ofs[k] * 8)); _val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val)); __m128i _w = _mm_loadl_epi64((const __m128i*)kptr); _w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w)); __m128i _sl = _mm_mullo_epi16(_val, _w); __m128i _sh = _mm_mulhi_epi16(_val, _w); __m128i _s0 = _mm_unpacklo_epi16(_sl, _sh); __m128i _s1 = _mm_unpackhi_epi16(_sl, _sh); __m128i _s4 = _mm_add_epi32(_s0, _s1); // TODO use _mm_hadd_epi32 on ssse3 int s4[4]; _mm_storeu_si128((__m128i*)s4, _s4); sum += s4[0] + s4[1] + s4[2] + s4[3]; // dot kptr += 8; } } outptr[j] = sum; } outptr += outw; } } }
DRB005-indirectaccess1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program is extracted from a real application at LLNL. Two pointers (xa1 and xa2) have a pair of values with a distance of 12. They are used as start base addresses for two 1-D arrays. Their index set has two indices with distance of 12: 999 +12 = 1011. So there is loop carried dependence. However, having loop carried dependence does not mean data races will always happen. The iterations with loop carried dependence must be scheduled to different threads in order for data races to happen. In this example, we use schedule(static,1) to increase the chance that the dependent loop iterations will be scheduled to different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 #include <omp.h> int indexSet[180] = {(521), (523), (525), (527), (529), (531), (547), (549), (551), (553), (555), (557), (573), (575), (577), (579), (581), (583), (599), (601), (603), (605), (607), (609), (625), (627), (629), (631), (633), (635), (651), (653), (655), (657), (659), (661), (859), (861), (863), (865), (867), (869), (885), (887), (889), (891), (893), (895), (911), (913), (915), (917), (919), (923), (937), (939), (941), (943), (945), (947), (963), (965), (967), (969), (971), (973), (989), (991), (993), (995), (997), (999), (1197), (1199), (1201), (1203), (1205), (1207), (1223), (1225), (1227), (1229), (1231), (1233), (1249), (1251), (1253), (1255), (1257), (1259), (1275), (1277), (1279), (1281), (1283), (1285), (1301), (1303), (1305), (1307), (1309), (1311), (1327), (1329), (1331), (1333), (1335), (1337), (1535), (1537), (1539), (1541), (1543), (1545), (1561), (1563), (1565), (1567), (1569), (1571), (1587), (1589), (1591), (1593), (1595), (1597), (1613), (1615), (1617), (1619), (1621), (1623), (1639), (1641), (1643), (1645), (1647), (1649), (1665), (1667), (1669), (1671), (1673), (1675), (1873), (1875), (1877), (1879), (1881), (1883), (1899), (1901), (1903), (1905), (1907), (1909), (1925), (1927), (1929), (1931), (1933), (1935), (1951), (1953), (1955), (1957), (1959), (1961), (1977), (1979), (1981), (1983), (1985), (1987), (2003), (2005), (2007), (2009), (2011), (2013) // change original 921 to 923 = 911+12 }; int main(int argc,char *argv[]) { // max index value is 2013. +12 to obtain a valid xa2[idx] after xa1+12. // +1 to ensure a reference like base[2015] is within the bound. double *base = (double *)(malloc(sizeof(double ) * (2013 + 12 + 1))); if (base == 0) { printf("Error in malloc(). Aborting ...\n"); return 1; } double *xa1 = base; double *xa2 = xa1 + 12; int i; // initialize segments touched by indexSet #pragma omp parallel for private (i) for (i = 521; i <= 2025; i += 1) { base[i] = 0.5 * i; } // default static even scheduling may not trigger data race, using static,1 instead. for (i = 0; i <= 179; i += 1) { int idx = indexSet[i]; xa1[idx] += 1.0 + i; xa2[idx] += 3.0 + i; } printf("x1[999]=%lf xa2[1285]=%lf\n",xa1[999],xa2[1285]); free(base); return 0; }
simd-3.c
/* { dg-do run } */ /* { dg-additional-options "-msse2" { target sse2_runtime } } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ extern void abort (); int a[1024] __attribute__((aligned (32))) = { 1 }; int b[1024] __attribute__((aligned (32))) = { 1 }; unsigned char c[1024] __attribute__((aligned (32))) = { 1 }; int k, m; __UINTPTR_TYPE__ u, u2, u3; __attribute__((noinline, noclone)) int foo (int *p) { int i, s = 0, s2 = 0, t, t2; #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s) \ lastprivate (t2) for (i = 0; i < 512; i++) { a[i] *= p[i]; t2 = k + p[i]; k += m + 1; s += p[i] + k; c[i]++; } #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s2) \ lastprivate (t, u, u2, u3) for (i = 512; i < 1024; i++) { a[i] *= p[i]; k += m + 1; t = k + p[i]; u = (__UINTPTR_TYPE__) &k; u2 = (__UINTPTR_TYPE__) &s2; u3 = (__UINTPTR_TYPE__) &t; s2 += t; c[i]++; } return s + s2 + t + t2; } __attribute__((noinline, noclone)) long int bar (int *p, long int n, long int o) { long int i, s = 0, s2 = 0, t, t2; #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s) \ lastprivate (t2) for (i = 0; i < n; i++) { a[i] *= p[i]; t2 = k + p[i]; k += m + 1; s += p[i] + k; c[i]++; } #pragma omp simd aligned(a, b, p : 32) linear(k: m + 1) reduction(+:s2) \ lastprivate (t, u, u2, u3) for (i = n; i < o; i++) { a[i] *= p[i]; k += m + 1; t = k + p[i]; u = (__UINTPTR_TYPE__) &k; u2 = (__UINTPTR_TYPE__) &s2; u3 = (__UINTPTR_TYPE__) &t; s2 += t; c[i]++; } return s + s2 + t + t2; } int main () { #if __SIZEOF_INT__ >= 4 int i; k = 4; m = 2; for (i = 0; i < 1024; i++) { a[i] = i - 512; b[i] = (i - 51) % 39; c[i] = (unsigned char) i; } int s = foo (b); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i] || c[i] != (unsigned char) (i + 1)) abort (); a[i] = i - 512; } if (k != 4 + 3 * 1024 || s != 1596127 + (4 + 3 * 511 + b[511]) + (4 + 3 * 1024 + b[1023])) abort (); k = 4; s = bar (b, 512, 1024); for (i = 0; i < 1024; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i] || c[i] != (unsigned char) (i + 2)) abort (); a[i] = i - 512; } if (k != 4 + 3 * 1024 || s != 1596127 + (4 + 3 * 511 + b[511]) + (4 + 3 * 1024 + b[1023])) abort (); k = 4; s = bar (b, 511, 1021); for (i = 0; i < 1021; i++) { if (b[i] != (i - 51) % 39 || a[i] != (i - 512) * b[i] || c[i] != (unsigned char) (i + 3)) abort (); a[i] = i - 512; } for (i = 1021; i < 1024; i++) if (b[i] != (i - 51) % 39 || a[i] != i - 512 || c[i] != (unsigned char) (i + 2)) abort (); if (k != 4 + 3 * 1021 || s != 1586803 + (4 + 3 * 510 + b[510]) + (4 + 3 * 1021 + b[1020])) abort (); #endif return 0; }
bench.c
#include "omp.h" #include "pmsis.h" #define LOOP_ITER (2048) #define NB_ITER (256) #define NB_BARRIER_ITER (256) #define NB_ITER_SINGLE (128) #define CORE_ID pi_core_id() #define PRINTF(...) //#define PRINTF(...) printf(__VA_ARGS__) static void test_start_timer() { pi_perf_cl_reset(); pi_perf_conf(1<<PI_PERF_CYCLES); pi_perf_cl_start(); } static void test_reset_timer() { pi_perf_cl_reset(); } static unsigned int test_get_time() { return pi_perf_cl_read(PI_PERF_CYCLES); } static inline unsigned int startTimer() { PRINTF("Starting timer\n"); test_reset_timer(); test_start_timer(); return 0; } static inline unsigned int getTimer(unsigned int start) { PRINTF("Ending timer\n"); return test_get_time(); } void test_barrier(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) shared(nthreads) { unsigned int start; int i; float operation_cost = 0; if (omp_get_thread_num() == 0) { start = startTimer(); } for (i = 0; i < NB_BARRIER_ITER; i++) { #pragma omp barrier } if (omp_get_thread_num() == 0) { unsigned int end = getTimer(start); operation_cost = (float) end / NB_BARRIER_ITER; printf("BARRIER %d threads: %f cycles\n", nthreads, operation_cost); } } } void test_critical(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) { int i; unsigned int start = startTimer(); float operation_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp critical { volatile int a = 0; } } #pragma omp barrier operation_cost = (float) getTimer(start) / NB_ITER; if (CORE_ID == 0) { printf("CRITICAL %d threads: %.3f cycles\n", nthreads, operation_cost); } } } void test_parallel_loop_static(unsigned int nthreads) { int i; int j; unsigned int start = startTimer(); float iteration_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp parallel for num_threads(nthreads) for (j = 0; j < LOOP_ITER; j++) { volatile int a = j; } } iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER)); printf("PARALLEL FOR %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost); } void test_parallel_single(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) { int i; int j; unsigned int start = startTimer(); float iteration_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp single { volatile int a = 0; } } if (omp_get_thread_num() == 0) { iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER)); printf("PARALLEL SINGLE %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost); } } } void test_entry() { for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++) { test_barrier(i); } printf("\n"); for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++) { test_critical(i); } printf("\n"); for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++) { test_parallel_loop_static (i); } printf("\n"); for (uint32_t i = 1; i <= pi_cl_cluster_nb_pe_cores(); i++) { test_parallel_single(i); } } void launch_test(void) { printf("Entering main controller\n"); uint32_t errors = 0; uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id(); struct pi_device cluster_dev; struct pi_cluster_conf cl_conf; /* Init cluster configuration structure. */ pi_cluster_conf_init(&cl_conf); cl_conf.id = 0; /* Set cluster ID. */ /* Configure & open cluster. */ pi_open_from_conf(&cluster_dev, &cl_conf); if (pi_cluster_open(&cluster_dev)) { printf("Cluster open failed !\n"); pmsis_exit(-1); } /* Prepare cluster task and send it to cluster. */ struct pi_cluster_task cl_task; pi_cluster_task(&cl_task, test_entry, NULL); pi_cluster_send_task_to_cl(&cluster_dev, &cl_task); pi_cluster_close(&cluster_dev); printf("Test success !\n"); pmsis_exit(errors); } /* Program Entry. */ int main(void) { printf("\n\n\t *** OpenMP Benchmark ***\n\n"); return pmsis_kickoff((void *) launch_test); }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % Cristy % % July 2009 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/fourier.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { PixelChannel channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p l e x I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ComplexImages() performs complex mathematics on an image sequence. % % The format of the ComplexImages method is: % % MagickBooleanType ComplexImages(Image *images,const ComplexOperator op, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A complex operator. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(images,complex_images,images->rows,1L) #endif for (y=0; y < (ssize_t) images->rows; y++) { register const Quantum *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register Quantum *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Ar_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Ai_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Br_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Bi_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const Quantum *) NULL) || (Ai == (const Quantum *) NULL) || (Br == (const Quantum *) NULL) || (Bi == (const Quantum *) NULL) || (Cr == (Quantum *) NULL) || (Ci == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(images); i++) { switch (op) { case AddComplexOperator: { Cr[i]=Ar[i]+Br[i]; Ci[i]=Ai[i]+Bi[i]; break; } case ConjugateComplexOperator: default: { Cr[i]=Ar[i]; Ci[i]=(-Bi[i]); break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal(Br[i]*Br[i]+Bi[i]*Bi[i]+snr); Cr[i]=gamma*(Ar[i]*Br[i]+Ai[i]*Bi[i]); Ci[i]=gamma*(Ai[i]*Br[i]-Ar[i]*Bi[i]); break; } case MagnitudePhaseComplexOperator: { Cr[i]=sqrt(Ar[i]*Ar[i]+Ai[i]*Ai[i]); Ci[i]=atan2(Ai[i],Ar[i])/(2.0*MagickPI)+0.5; break; } case MultiplyComplexOperator: { Cr[i]=QuantumScale*(Ar[i]*Br[i]-Ai[i]*Bi[i]); Ci[i]=QuantumScale*(Ai[i]*Br[i]+Ar[i]*Bi[i]); break; } case RealImaginaryComplexOperator: { Cr[i]=Ar[i]*cos(2.0*MagickPI*(Ai[i]-0.5)); Ci[i]=Ar[i]*sin(2.0*MagickPI*(Ai[i]-0.5)); break; } case SubtractComplexOperator: { Cr[i]=Ar[i]-Br[i]; Ci[i]=Ai[i]-Bi[i]; break; } } } Ar+=GetPixelChannels(Ar_image); Ai+=GetPixelChannels(Ai_image); Br+=GetPixelChannels(Br_image); Bi+=GetPixelChannels(Bi_image); Cr+=GetPixelChannels(Cr_image); Ci+=GetPixelChannels(Ci_image); } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels) { double *source_pixels; MemoryInfo *source_info; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ source_info=AcquireVirtualMemory(width,height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) return(MagickFalse); source_pixels=(double *) GetVirtualMemoryBlob(source_info); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; source_pixels[v*width+u]=roll_pixels[i++]; } } (void) memcpy(roll_pixels,source_pixels,height*width* sizeof(*source_pixels)); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source_pixels,double *forward_pixels) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L, source_pixels); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[(height-y)*width+width/2L-x-1L]= source_pixels[y*center+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) forward_pixels[width/2L-x-1L]=source_pixels[x+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier_pixels) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier_pixels[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_pixels, *phase_pixels; Image *magnitude_image, *phase_image; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; register Quantum *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); (void) memset(magnitude_pixels,0,fourier_info->width* fourier_info->height*sizeof(*magnitude_pixels)); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); (void) memset(phase_pixels,0,fourier_info->width* fourier_info->height*sizeof(*phase_pixels)); status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height, magnitude,magnitude_pixels); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase, phase_pixels); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]/=(2.0*MagickPI); phase_pixels[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(magnitude_image,ClampToQuantum(QuantumRange* magnitude_pixels[i]),q); break; } } i++; q+=GetPixelChannels(magnitude_image); } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->width,1UL, exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BluePixelChannel: { SetPixelBlue(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case BlackPixelChannel: { SetPixelBlack(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } case AlphaPixelChannel: { SetPixelAlpha(phase_image,ClampToQuantum(QuantumRange* phase_pixels[i]),q); break; } } i++; q+=GetPixelChannels(phase_image); } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude_pixels,double *phase_pixels, ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_complex *forward_pixels; fftw_plan fftw_r2c_plan; MemoryInfo *forward_info, *source_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); memset(source_pixels,0,fourier_info->width*fourier_info->height* sizeof(*source_pixels)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { source_pixels[i]=QuantumScale*GetPixelRed(image,p); break; } case GreenPixelChannel: { source_pixels[i]=QuantumScale*GetPixelGreen(image,p); break; } case BluePixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlue(image,p); break; } case BlackPixelChannel: { source_pixels[i]=QuantumScale*GetPixelBlack(image,p); break; } case AlphaPixelChannel: { source_pixels[i]=QuantumScale*GetPixelAlpha(image,p); break; } } i++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); forward_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*forward_pixels)); if (forward_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); return(MagickFalse); } forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source_pixels,forward_pixels,FFTW_ESTIMATE); fftw_execute_dft_r2c(fftw_r2c_plan,source_pixels,forward_pixels); fftw_destroy_plan(fftw_r2c_plan); source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info); value=GetImageArtifact(image,"fourier:normalize"); if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0)) { double gamma; /* Normalize fourier transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) forward_pixels[i]*=gamma; #else forward_pixels[i][0]*=gamma; forward_pixels[i][1]*=gamma; #endif i++; } } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=cabs(forward_pixels[i]); phase_pixels[i]=carg(forward_pixels[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude_pixels[i]=creal(forward_pixels[i]); phase_pixels[i]=cimag(forward_pixels[i]); i++; } forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude_pixels, *phase_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *magnitude_info, *phase_info; fourier_info.width=image->columns; fourier_info.height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*phase_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL)) { if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (magnitude_info == (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels, phase_pixels,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels, phase_pixels,exception); phase_info=RelinquishVirtualMemory(phase_info); magnitude_info=RelinquishVirtualMemory(magnitude_info); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t height, width; width=image->columns; height=image->rows; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { size_t extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } height=width; magnitude_image=CloneImage(image,width,height,MagickTrue,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,height,MagickTrue,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsImageGray(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayPixelChannel,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->alpha_trait != UndefinedPixelTrait) thread_status=ForwardFourierTransformChannel(image, AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) (width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[(height-y)*center-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[y*center]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image, fftw_complex *fourier_pixels,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *inverse_pixels, *magnitude_pixels, *phase_pixels; MagickBooleanType status; MemoryInfo *inverse_info, *magnitude_info, *phase_info; register const Quantum *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*magnitude_pixels)); phase_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*phase_pixels)); inverse_info=AcquireVirtualMemory((size_t) fourier_info->width, (fourier_info->height/2+1)*sizeof(*inverse_pixels)); if ((magnitude_info == (MemoryInfo *) NULL) || (phase_info == (MemoryInfo *) NULL) || (inverse_info == (MemoryInfo *) NULL)) { if (magnitude_info != (MemoryInfo *) NULL) magnitude_info=RelinquishVirtualMemory(magnitude_info); if (phase_info != (MemoryInfo *) NULL) phase_info=RelinquishVirtualMemory(phase_info); if (inverse_info != (MemoryInfo *) NULL) inverse_info=RelinquishVirtualMemory(inverse_info); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info); phase_pixels=(double *) GetVirtualMemoryBlob(phase_info); inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info); i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { magnitude_pixels[i]=QuantumScale*GetPixelRed(magnitude_image,p); break; } case GreenPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelGreen(magnitude_image,p); break; } case BluePixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlue(magnitude_image,p); break; } case BlackPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelBlack(magnitude_image,p); break; } case AlphaPixelChannel: { magnitude_pixels[i]=QuantumScale*GetPixelAlpha(magnitude_image,p); break; } } i++; p+=GetPixelChannels(magnitude_image); } } magnitude_view=DestroyCacheView(magnitude_view); status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_pixels,inverse_pixels); (void) memcpy(magnitude_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*magnitude_pixels)); i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedPixelChannel: default: { phase_pixels[i]=QuantumScale*GetPixelRed(phase_image,p); break; } case GreenPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelGreen(phase_image,p); break; } case BluePixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlue(phase_image,p); break; } case BlackPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelBlack(phase_image,p); break; } case AlphaPixelChannel: { phase_pixels[i]=QuantumScale*GetPixelAlpha(phase_image,p); break; } } i++; p+=GetPixelChannels(phase_image); } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_pixels[i]-=0.5; phase_pixels[i]*=(2.0*MagickPI); i++; } } phase_view=DestroyCacheView(phase_view); CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_pixels,inverse_pixels); (void) memcpy(phase_pixels,inverse_pixels,fourier_info->height* fourier_info->center*sizeof(*phase_pixels)); inverse_info=RelinquishVirtualMemory(inverse_info); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I* magnitude_pixels[i]*sin(phase_pixels[i]); #else fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]); fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i]; #else fourier_pixels[i][0]=magnitude_pixels[i]; fourier_pixels[i][1]=phase_pixels[i]; #endif i++; } magnitude_info=RelinquishVirtualMemory(magnitude_info); phase_info=RelinquishVirtualMemory(phase_info); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception) { CacheView *image_view; const char *value; double *source_pixels; fftw_plan fftw_c2r_plan; MemoryInfo *source_info; register Quantum *q; register ssize_t i, x; ssize_t y; source_info=AcquireVirtualMemory((size_t) fourier_info->width, fourier_info->height*sizeof(*source_pixels)); if (source_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } source_pixels=(double *) GetVirtualMemoryBlob(source_info); value=GetImageArtifact(image,"fourier:normalize"); if (LocaleCompare(value,"inverse") == 0) { double gamma; /* Normalize inverse transform. */ i=0L; gamma=PerceptibleReciprocal((double) fourier_info->width* fourier_info->height); for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier_pixels[i]*=gamma; #else fourier_pixels[i][0]*=gamma; fourier_pixels[i][1]*=gamma; #endif i++; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier_pixels,source_pixels,FFTW_ESTIMATE); fftw_execute_dft_c2r(fftw_c2r_plan,fourier_pixels,source_pixels); fftw_destroy_plan(fftw_c2r_plan); i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (Quantum *) NULL) break; for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedPixelChannel: default: { SetPixelRed(image,ClampToQuantum(QuantumRange*source_pixels[i]),q); break; } case GreenPixelChannel: { SetPixelGreen(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BluePixelChannel: { SetPixelBlue(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case BlackPixelChannel: { SetPixelBlack(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } case AlphaPixelChannel: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*source_pixels[i]), q); break; } } i++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source_info=RelinquishVirtualMemory(source_info); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const PixelChannel channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { fftw_complex *inverse_pixels; FourierInfo fourier_info; MagickBooleanType status; MemoryInfo *inverse_info; fourier_info.width=magnitude_image->columns; fourier_info.height=magnitude_image->rows; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { size_t extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) (fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; inverse_info=AcquireVirtualMemory((size_t) fourier_info.width, (fourier_info.height/2+1)*sizeof(*inverse_pixels)); if (inverse_info == (MemoryInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info); status=InverseFourier(&fourier_info,magnitude_image,phase_image, inverse_pixels,exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image, exception); inverse_info=RelinquishVirtualMemory(inverse_info); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickCoreSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickTrue,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsImageGray(magnitude_image); if (is_gray != MagickFalse) is_gray=IsImageGray(phase_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayPixelChannel,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BluePixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlackPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->alpha_trait != UndefinedPixelTrait) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,AlphaPixelChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
process.h
/* * @Author: Philippe Dales * @Date: 2018-07-26 14:26:23 * @Last Modified by: Philippe Dales * @Last Modified time: 2018-07-26 14:26:23 */ /* Signal processing functions. */ #ifndef PROCESS_H #define PROCESS_H #define _USE_MATH_DEFINES #include <cmath> #include <iostream> #include <vector> #include <array> #include <algorithm> #include <fftw3.h> // #include <queue> // #include <random> #include <thread> #include <functional> #include <chrono> // #include <omp.h> #include "xseis/structures.h" // typedef std::pair<float, std::array<float, 3> > vpair; // typedef std::priority_queue<vpair, std::vector<vpair>, std::greater<vpair>> fe_queue; namespace process { // template<typename T> // T max(T* begin, T* end) { // return *std::max_element(begin, end); // } // template<typename T> // T min(T* begin, T* end) { // return *std::min_element(begin, end); // } // template<typename T> // size_t argmax(T* begin, T* end) { // return std::distance(begin, std::max_element(begin, end)); // } template<typename Container> float max(Container& data) { return *std::max_element(data.begin(), data.end()); } template<typename Container> float min(Container& data) { return *std::min_element(data.begin(), data.end()); } // template<typename T> // T min(T* begin, T* end) { // return *std::min_element(begin, end); // } template<typename Container> size_t argmax(Container& data) { return std::distance(data.begin(), std::max_element(data.begin(), data.end())); } inline float AngleBetweenPoints(float* a, float*b) { return std::atan((a[1] - b[1]) / (a[0] - b[0])); // return std::atan2(a[1] - b[1], a[0] - b[0]); } inline float DistCartesian(float* a, float* b) { float dx = a[0] - b[0]; float dy = a[1] - b[1]; float dz = a[2] - b[2]; return std::sqrt(dx * dx + dy * dy + dz * dz); } inline float DistCartesian2D(float* a, float* b) { float dx = a[0] - b[0]; float dy = a[1] - b[1]; return std::sqrt(dx * dx + dy * dy); } float DistDiff(float* a, float* b, float* c) { return DistCartesian(a, c) - DistCartesian(b, c); } uint mod_floor(int a, int n) { return ((a % n) + n) % n; } Vector<fftwf_complex> BuildPhaseShiftVec(size_t const nfreq, int const nshift) { auto v = Vector<fftwf_complex>(nfreq); // std::vector<fftwf_complex> v(nfreq); float const fstep = 0.5 / (nfreq - 1); float const factor = nshift * 2 * M_PI * fstep; for(size_t i = 0; i < nfreq; ++i) { v[i][0] = std::cos(i * factor); v[i][1] = std::sin(i * factor); } return v; } // Mutiply sig1 by sig2 (x + yi)(u + vi) = (xu-yv) + (xv+yu)i // x + yi = s1[0] + s1[1]i // u + vi = s2[0] + s2[1]i #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) void Convolve(fftwf_complex const* const sig2, fftwf_complex* const sig1, uint32_t const nfreq) { float tmp; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ tmp = sig1[i][0] * sig2[i][0] - sig1[i][1] * sig2[i][1]; sig1[i][1] = sig1[i][0] * sig2[i][1] + sig1[i][1] * sig2[i][0]; sig1[i][0] = tmp; } } #pragma omp declare simd aligned(sig1, sig2, out:MEM_ALIGNMENT) inline void Convolve(fftwf_complex const* const sig1, fftwf_complex const* const sig2, fftwf_complex* const out, uint32_t const nfreq) { #pragma omp simd aligned(sig1, sig2, out:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ out[i][0] = sig1[i][0] * sig2[i][0] - sig1[i][1] * sig2[i][1]; out[i][1] = sig1[i][0] * sig2[i][1] + sig1[i][1] * sig2[i][0]; } } #pragma omp declare simd aligned(data, stack:MEM_ALIGNMENT) inline void Accumulate(fftwf_complex const* const data, fftwf_complex* const stack, uint32_t const npts) { #pragma omp simd aligned(data, stack:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { stack[i][0] += data[i][0]; stack[i][1] += data[i][1]; } } #pragma omp declare simd aligned(data, stack:MEM_ALIGNMENT) inline void Accumulate(float const* const data, float* const stack, uint32_t const npts) { #pragma omp simd aligned(data, stack:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { stack[i] += data[i]; } } #pragma omp declare simd aligned(sig:MEM_ALIGNMENT) void Whiten(fftwf_complex* const sig, uint32_t const npts) { #pragma omp simd aligned(sig:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { float abs = std::sqrt(sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]); sig[i][0] /= abs; sig[i][1] /= abs; } } #pragma omp declare simd aligned(sig, out:MEM_ALIGNMENT) void Absolute(fftwf_complex const* const sig, float* out, uint32_t const npts) { #pragma omp simd aligned(sig, out:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { out[i] = std::sqrt(sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]); } } #pragma omp declare simd aligned(sig:MEM_ALIGNMENT) void Absolute(float* sig, uint32_t const npts) { #pragma omp simd aligned(sig:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { sig[i] = std::abs(sig[i]); } } // #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) // void Convolve(fftwf_complex* sig1, fftwf_complex* sig2, uint32_t const nfreq) // { // float tmp; // #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) // for (uint32_t i = 0; i < nfreq; ++i){ // tmp = sig1[i][0] * sig2[i][0] - sig1[i][1] * sig2[i][1]; // sig1[i][1] = sig1[i][0] * sig2[i][1] + sig1[i][1] * sig2[i][0]; // sig1[i][0] = tmp; // } // } // Cross-correlate complex signals, cc(f) = s1(f) x s2*(f) #pragma omp declare simd aligned(sig1, sig2, out:MEM_ALIGNMENT) void XCorr(fftwf_complex const* const sig1, fftwf_complex const* const sig2, fftwf_complex* const out, uint32_t const nfreq) { #pragma omp simd aligned(sig1, sig2, out:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ out[i][0] = sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]; out[i][1] = sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]; } } #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) float DotProductEnergy(float const* const sig1, float const* const sig2, uint32_t const npts) { float result = 0; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < npts; ++i){ // result += sig1[0] * sig2[0]; result += (sig1[0] * sig2[0]) * (sig1[0] * sig2[0]); } return result; } #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) float DotProduct(float const* const sig1, float const* const sig2, uint32_t const npts) { float result = 0; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < npts; ++i){ result += sig1[0] * sig2[0]; } return result; } // // Cross-correlate signal pairs of fdata and output to fdata_cc // void XCorrPairs(Array2D<fftwf_complex>& fdata, Array2D<uint16_t>& ckeys, Array2D<fftwf_complex>& fdata_cc) // { // uint32_t nfreq = fdata.ncol_; // #pragma omp for // for (size_t i = 0; i < ckeys.nrow_; ++i) // { // // std::cout << "npair: " << i << '\n'; // XCorr(fdata.row(ckeys(i, 0)), fdata.row(ckeys(i, 1)), // fdata_cc.row(i), nfreq); // } // } // #pragma omp declare simd aligned(data:MEM_ALIGNMENT) template <typename T, typename F> void ApplyFuncToRows(T *__restrict__ data, size_t nsig, size_t npts, F* func){ // Generic map function // #pragma omp for simd aligned(data:MEM_ALIGNMENT) for (size_t i = 0; i < nsig; i++) { (*func)(data + (i * npts), npts); } } template <typename T, typename F> void ApplyFuncToRows(Array2D<T>& data, F* func){ ApplyFuncToRows(data.data_, data.nrow_, data.ncol_, func); } Vector<float> BuildFreqFilter(std::vector<float>& corner_freqs, uint nfreq, float sr) { float fsr = (nfreq * 2 - 1) / sr; // printf("nfreq: %u, FSR: %.4f\n", nfreq, fsr); std::vector<uint32_t> cx; for(auto&& cf : corner_freqs) { cx.push_back(static_cast<uint32_t>(cf * fsr + 0.5)); // printf("cf/fsr %.2f, %.5f\n", cf, fsr); } // printf("filt corner indexes \n"); // for(auto&& c : cx) { // // printf("cx/ cast: %.3f, %u\n", cx, (uint32_t)cx); // printf("--%u--", c); // } // printf("\n"); // whiten corners: cutmin--porte1---porte2--cutmax auto filter = Vector<float>(nfreq); filter.fill(0); // int wlen = porte1 - cutmin; float cosm_left = M_PI / (2. * (cx[1] - cx[0])); // left hand taper for (uint i = cx[0]; i < cx[1]; ++i) { filter[i] = std::pow(std::cos((cx[1] - (i + 1) ) * cosm_left), 2.0); } // setin middle freqs amp = 1 for (uint i = cx[1]; i < cx[2]; ++i) { filter[i] = 1; } float cosm_right = M_PI / (2. * (cx[3] - cx[2])); // right hand taper for (uint i = cx[2]; i < cx[3]; ++i) { filter[i] = std::pow(std::cos((i - cx[2]) * cosm_right), 2.0); } return filter; } void ApplyFreqFilterReplace(float (*fdata)[2], uint const nfreq, Vector<float>& filter) { float angle; for (uint i = 0; i < filter.size_; ++i) { if(filter[i] == 0) { fdata[i][0] = 0; fdata[i][1] = 0; } else { angle = std::atan2(fdata[i][1], fdata[i][0]); fdata[i][0] = filter[i] * std::cos(angle); fdata[i][1] = filter[i] * std::sin(angle); } } } void ApplyFreqFilterMultiply(float (*fdata)[2], uint nfreq, Vector<float>& filter) { float angle; for (uint i = 0; i < filter.size_; ++i) { if(filter[i] == 0) { fdata[i][0] = 0; fdata[i][1] = 0; } else { angle = std::atan2(fdata[i][1], fdata[i][0]); fdata[i][0] *= filter[i] * std::cos(angle); fdata[i][1] *= filter[i] * std::sin(angle); } } } void square_signal(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i) { sig[i] = sig[i] * sig[i]; } } void root_signal(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i) { sig[i] = std::sqrt(sig[i]); } } float rms_energy(float *sig, size_t npts) { // np.sqrt(np.mean(data ** 2, axis=axis)) float square_sum = 0; for (size_t i = 0; i < npts; ++i){ square_sum += sig[i] * sig[i]; } return std::sqrt(square_sum / npts); } void clip(float *sig, size_t npts, float thresh){ for (size_t i = 0; i < npts; ++i){ if (sig[i] > thresh){sig[i] = thresh;} else if (sig[i] < -thresh){sig[i] = -thresh;} } } void demean(float *sig, size_t npts) { float mean = 0; for (size_t i = 0; i < npts; ++i){ mean += sig[i]; } mean /= npts; for (size_t i = 0; i < npts; ++i){ sig[i] -= mean; } } void norm_one_bit(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i){ sig[i] = (sig[i] > 0) - (sig[i] < 0); } } void norm_one_or_zero(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i){ if(sig[i] <= 0) { sig[i] = 0; } else{ sig[i] = 1; } } } void ExpMovingAverage(float *sig, size_t npts, uint wlen, bool both_ways=false) { float alpha = 2 / (static_cast<float>(wlen) + 1); float beta = 1 - alpha; sig[0] = std::abs(sig[0]); for (size_t i = 1; i < npts; ++i){ sig[i] = alpha * std::abs(sig[i]) + beta * sig[i - 1]; } if(both_ways == true) { for (long i = npts - 2; i >= 0; --i){ sig[i] = alpha * std::abs(sig[i]) + beta * sig[i + 1]; } } } void EMA_NoAbs(float *sig, size_t npts, uint wlen, bool both_ways=false) { float alpha = 2 / (static_cast<float>(wlen) + 1); float beta = 1 - alpha; for (size_t i = 1; i < npts; ++i){ sig[i] = alpha * sig[i] + beta * sig[i - 1]; } if(both_ways == true) { for (long i = npts - 2; i >= 0; --i){ sig[i] = alpha * sig[i] + beta * sig[i + 1]; } } } float median(float *sig, size_t npts) { size_t half = npts / 2; std::nth_element(sig, sig + half, sig + npts); return sig[half]; } // void ExpMovingAverageSquare(float *sig, size_t npts, uint wlen) // { // float alpha = 2 / (static_cast<float>(wlen) + 1); // float beta = 1 - alpha; // sig[0] = sig[0] * sig[0]; // for (size_t i = 1; i < npts; ++i){ // sig[i] = alpha * sig[i] * sig[i] + beta * sig[i - 1]; // } // } // esig[i] = alpha * esig[i] + (1 - alpha) * esig[i - 1] // template<typename T> // bool abs_compare(T a, T b) bool abs_compare(float a, float b) { return (std::abs(a) < std::abs(b)); } void norm_max_abs(float *sig, size_t npts) { float max = *std::max_element(sig, sig + npts, abs_compare); if (max != 0){ for (size_t i = 0; i < npts; ++i){ sig[i] /= max; } } } void zero_around_max(float *sig, size_t npts, size_t wlen) { // size_t amax = std::distance(sig, std::max_element(sig, sig + npts)); // size_t hlen = wlen / 2; // size_t cutmin = std::max(amax - hlen, (size_t) 0); // size_t cutmax = std::min(amax + hlen, (size_t) npts); // for(size_t i = cutmin; i < cutmax; ++i) { // sig[i] = 0; // } long amax = std::distance(sig, std::max_element(sig, sig + npts)); long hlen = wlen / 2; long cutmin = amax - hlen; long cutmax = amax + hlen; if(cutmin >= 0 && cutmax <= npts) { for(size_t i = cutmin; i < cutmax; ++i) { sig[i] = 0; } } else if (cutmin < 0){ for(size_t i = npts + cutmin; i < npts; ++i) { sig[i] = 0; } for(size_t i = 0; i < cutmax; ++i) { sig[i] = 0; } } else if (cutmax > npts){ for(size_t i = cutmin; i < npts; ++i) { sig[i] = 0; } for(size_t i = 0; i < cutmax - npts; ++i) { sig[i] = 0; } } } void absolute(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i){ sig[i] = std::abs(sig[i]); } } void Roll(float* sig, size_t npts, long nroll) { std::rotate(sig, sig + nroll, sig + npts); } void taper(float *sig, size_t npts, uint len_taper) { float factor = (2 * M_PI) / ((len_taper * 2) - 1); float *sig_end = sig + npts - len_taper; for (size_t i = 0; i < len_taper; ++i) { sig[i] *= 0.5 - 0.5 * std::cos(i * factor); } for (size_t i = 0; i < len_taper; ++i) { sig_end[i] *= 0.5 - 0.5 * std::cos((i + len_taper) * factor); } } template<typename T> float standard_deviation(T *data, size_t size) { float mean = 0; for(size_t i = 0; i < size; ++i) { mean += data[i]; } mean /= size; float var = 0; for(size_t i = 0; i < size; ++i) { var += (data[i] - mean) * (data[i] - mean); } return std::sqrt(var / size); } template<typename T> float mean(T *data, size_t size) { float mean = 0; for(size_t i = 0; i < size; ++i) { mean += data[i]; } mean /= size; return mean; } // void norm_energy(float (*sig)[2], int npts) // { // int nfreq = npts / 2 + 1; // float energy = 0; // for (int i = 0; i < nfreq; ++i) { // energy += (sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]); // } // // printf("energy = %.5f \n", energy); // // printf("nfreq = %d \n", nfreq); // for (int i = 0; i < nfreq; ++i) { // sig[i][0] /= energy; // sig[i][1] /= energy; // } // } void SlidingWinMax(float *sig, size_t npts, size_t wlen) { // Sliding window max abs val smoothin (horribly slow) absolute(sig, npts); if (wlen % 2 == 0){wlen += 1;} size_t hlen = wlen / 2 + 1; float buf[wlen]; size_t buf_idx = 0; // Fill buffer with last WLEN vals of sig std::copy(&sig[npts - wlen], &sig[npts], buf); // Handle edge case with index wrapin via mod function for (size_t i = npts - hlen; i < npts + hlen; ++i) { sig[i % npts] = *std::max_element(buf, buf + wlen); buf[buf_idx] = sig[(i + hlen) % npts]; buf_idx = (buf_idx + 1) % wlen; } // handle non-edge case for (size_t i = hlen; i < npts - hlen; ++i) { sig[i] = *std::max_element(buf, buf + wlen); buf[buf_idx] = sig[i + hlen]; buf_idx = (buf_idx + 1) % wlen; } } void Multiply(float *sig, size_t npts, float val){ for (size_t i = 0; i < npts; ++i){ sig[i] *= val; } } void Multiply(fftwf_complex* data, size_t npts, float val) { for(size_t i = 0; i < npts; ++i) { data[i][0] *= val; data[i][1] *= val; } } template<typename Container> void Multiply(Container& data, float val) { Multiply(data.data_, data.size_, val); } void Fill(fftwf_complex* data, size_t npts, float val) { for(size_t i = 0; i < npts; ++i) { data[i][0] = val; data[i][1] = val; } } void Fill(float* data, size_t npts, float val) { for(size_t i = 0; i < npts; ++i) { data[i] = val; } } void Fill(Vector<fftwf_complex>& data, float val) { for(size_t i = 0; i < data.size_; ++i) { data[i][0] = val; data[i][1] = val; } } void Fill(Vector<float>& data, float val) { for(size_t i = 0; i < data.size_; ++i) { data[i] = val; } } void Copy(fftwf_complex const *in, size_t npts, fftwf_complex *out) { std::copy(&(in)[0][0], &(in + npts)[0][0], &out[0][0]); } void Copy(float const *in, size_t npts, float *out) { std::copy(in, in + npts, out); } void Subtract(fftwf_complex const *data, fftwf_complex *data_mod, size_t npts) { for(size_t i = 0; i < npts; ++i) { data_mod[i][0] -= data[i][0]; data_mod[i][1] -= data[i][1]; } } #pragma omp declare simd aligned(sig:MEM_ALIGNMENT) float Energy(const fftwf_complex *sig, uint32_t const nfreq) { // E = 1/N sum(|x(f)**2|) float tmp = 0; #pragma omp simd aligned(sig:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ tmp += sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]; } return tmp / static_cast<float>(nfreq); } // Cross-correlate complex signals, cc(f) = s1(f) x s2*(f) #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) float XCorrEnergy(fftwf_complex const *sig1, fftwf_complex const *sig2, uint32_t const nfreq) { float a, b; float sum = 0; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ a = (sig1[i][0] * sig2[i][0]) + (sig1[i][1] * sig2[i][1]); b = (sig1[i][0] * sig2[i][1]) - (sig1[i][1] * sig2[i][0]); sum += (a * a) + (b * b); // a = sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]; // b = sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]; // sum += (sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]) * (sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]) + (sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]) * (sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]); } return sum; } // def get_pt(index, shape, spacing, origin): // nx, ny, nz = shape // # nx, ny, nz = spacing // iz = index % nz // iy = ((index - iz) / nz) % ny // ix = index / (nz * ny) // loc = np.array([ix, iy, iz], dtype=np.float32) * spacing + origin // return loc std::vector<float> get_point(size_t index, int* gdef){ int* shape = &gdef[0]; int* origin = &gdef[3]; int spacing = gdef[6]; int nx = shape[0]; int ny = shape[1]; int nz = shape[2]; int iz = index % nz; int iy = ((index - iz) / nz) % ny; int ix = index / (nz * ny); std::vector<float> v(3); v[0] = ix * spacing + origin[0]; v[1] = iy * spacing + origin[1]; v[2] = iz * spacing + origin[2]; return v; } } #endif
pair_mat.h
#ifndef VIENNA_RNA_PACKAGE_PAIR_MAT_H #define VIENNA_RNA_PACKAGE_PAIR_MAT_H #include <ctype.h> #include <ViennaRNA/utils/basic.h> #include <ViennaRNA/fold_vars.h> #define NBASES 8 /*@notnull@*/ #ifndef INLINE # ifdef __GNUC__ # define INLINE inline # else # define INLINE # endif #endif static const char Law_and_Order[] = "_ACGUTXKI"; static int BP_pair[NBASES][NBASES] = /* _ A C G U X K I */ { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 5, 0, 0, 5 }, { 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 2, 0, 3, 0, 0, 0 }, { 0, 6, 0, 4, 0, 0, 0, 6 }, { 0, 0, 0, 0, 0, 0, 2, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0 }, { 0, 6, 0, 0, 5, 0, 0, 0 } }; #define MAXALPHA 20 /* maximal length of alphabet */ static short alias[MAXALPHA + 1]; static int pair[MAXALPHA + 1][MAXALPHA + 1]; /* rtype[pair[i][j]]:=pair[j][i] */ static int rtype[8] = { 0, 2, 1, 4, 3, 6, 5, 7 }; #ifdef _OPENMP #pragma omp threadprivate(Law_and_Order, BP_pair, alias, pair, rtype) #endif /* for backward compatibility */ #define ENCODE(c) encode_char(c) static INLINE int encode_char(char c) { /* return numerical representation of base used e.g. in pair[][] */ int code; c = toupper(c); if (energy_set > 0) { code = (int)(c - 'A') + 1; } else { const char *pos; pos = strchr(Law_and_Order, c); if (pos == NULL) code = 0; else code = (int)(pos - Law_and_Order); if (code > 5) code = 0; if (code > 4) code--; /* make T and U equivalent */ } return code; } /*@+boolint +charint@*/ /*@null@*/ extern char *nonstandards; static INLINE void make_pair_matrix(void) { int i, j; if (energy_set == 0) { for (i = 0; i < 5; i++) alias[i] = (short)i; alias[5] = 3; /* X <-> G */ alias[6] = 2; /* K <-> C */ alias[7] = 0; /* I <-> default base '@' */ for (i = 0; i < NBASES; i++) for (j = 0; j < NBASES; j++) pair[i][j] = BP_pair[i][j]; if (noGU) pair[3][4] = pair[4][3] = 0; if (nonstandards != NULL) { /* allow nonstandard bp's */ for (i = 0; i < (int)strlen(nonstandards); i += 2) pair[encode_char(nonstandards[i])] [encode_char(nonstandards[i + 1])] = 7; } for (i = 0; i < NBASES; i++) for (j = 0; j < NBASES; j++) rtype[pair[i][j]] = pair[j][i]; } else { for (i = 0; i <= MAXALPHA; i++) for (j = 0; j <= MAXALPHA; j++) pair[i][j] = 0; if (energy_set == 1) { for (i = 1; i < MAXALPHA; ) { alias[i++] = 3; /* A <-> G */ alias[i++] = 2; /* B <-> C */ } for (i = 1; i < MAXALPHA; i++) { pair[i][i + 1] = 2; /* AB <-> GC */ i++; pair[i][i - 1] = 1; /* BA <-> CG */ } } else if (energy_set == 2) { for (i = 1; i < MAXALPHA; ) { alias[i++] = 1; /* A <-> A*/ alias[i++] = 4; /* B <-> U */ } for (i = 1; i < MAXALPHA; i++) { pair[i][i + 1] = 5; /* AB <-> AU */ i++; pair[i][i - 1] = 6; /* BA <-> UA */ } } else if (energy_set == 3) { for (i = 1; i < MAXALPHA - 2; ) { alias[i++] = 3; /* A <-> G */ alias[i++] = 2; /* B <-> C */ alias[i++] = 1; /* C <-> A */ alias[i++] = 4; /* D <-> U */ } for (i = 1; i < MAXALPHA - 2; i++) { pair[i][i + 1] = 2; /* AB <-> GC */ i++; pair[i][i - 1] = 1; /* BA <-> CG */ i++; pair[i][i + 1] = 5; /* CD <-> AU */ i++; pair[i][i - 1] = 6; /* DC <-> UA */ } } else { vrna_message_error("What energy_set are YOU using??"); } for (i = 0; i <= MAXALPHA; i++) for (j = 0; j <= MAXALPHA; j++) rtype[pair[i][j]] = pair[j][i]; } } static INLINE short * encode_sequence(const char *sequence, short how) { unsigned int i, l = (unsigned int)strlen(sequence); short *S = (short *)vrna_alloc(sizeof(short) * (l + 2)); switch (how) { /* standard encoding as always used for S */ case 0: for (i = 1; i <= l; i++) /* make numerical encoding of sequence */ S[i] = (short)encode_char(sequence[i - 1]); S[l + 1] = S[1]; S[0] = (short)l; break; /* encoding for mismatches of nostandard bases (normally used for S1) */ case 1: for (i = 1; i <= l; i++) S[i] = alias[(short)encode_char(sequence[i - 1])]; S[l + 1] = S[1]; S[0] = S[l]; break; } return S; } #endif /* VIENNA_RNA_PACKAGE_PAIR_MAT_H */
task-5.c
/* { dg-do run } */ #include <omp.h> #include <stdlib.h> int err; int main () { int e; #pragma omp parallel shared(err) { if (omp_in_final ()) #pragma omp atomic write err = 1; #pragma omp task if (0) shared(err) { if (omp_in_final ()) #pragma omp atomic write err = 1; #pragma omp task if (0) shared(err) if (omp_in_final ()) #pragma omp atomic write err = 1; } #pragma omp task final (1) shared(err) { if (!omp_in_final ()) #pragma omp atomic write err = 1; #pragma omp taskyield #pragma omp taskwait #pragma omp task shared(err) if (!omp_in_final ()) #pragma omp atomic write err = 1; } } #pragma omp atomic read e = err; if (e) abort (); return 0; }
knnImpl.h
// This code is part of the project "ParGeo: A Library for Parallel Computational Geometry" // Copyright (c) 2021-2022 Yiqiu Wang, Shangdi Yu, Laxman Dhulipala, Yan Gu, Julian Shun // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #pragma once #include <limits> #include <algorithm> #include "parlay/parallel.h" #include "parlay/sequence.h" #include "kdTree.h" #include "pargeo/point.h" // #include <omp.h> namespace pargeo::kdTreeNUMA { namespace knnBuf { typedef int intT; typedef double floatT; template <typename T> struct elem { floatT cost; // Non-negative T entry; elem(floatT t_cost, T t_entry) : cost(t_cost), entry(t_entry) {} elem() : cost(std::numeric_limits<floatT>::max()) {} bool operator<(const elem &b) const { if (cost < b.cost) return true; return false; } }; template <typename T> struct buffer { typedef parlay::slice<elem<T> *, elem<T> *> sliceT; intT k; intT ptr; sliceT buf; double max_cost = 0; buffer(intT t_k, sliceT t_buf) : k(t_k), ptr(0), buf(t_buf) {} inline void reset() { ptr = 0; } bool hasK() { return ptr >= k; } elem<T> keepK() { if (ptr < k) throw std::runtime_error("Error, kbuffer not enough k."); ptr = k; std::nth_element(buf.begin(), buf.begin() + k - 1, buf.end()); max_cost = 0; for(auto b = buf.begin(); b < buf.begin() + k ; ++b){ max_cost = std::max(max_cost, b->cost); } return buf[k - 1]; } void sort() { // todo check if (ptr < k) throw std::runtime_error("Error, sorting kbuffer without enough k."); parlay::sort_inplace(buf.cut(0, k)); } void insert(elem<T> t_elem) { buf[ptr++] = t_elem; max_cost = std::max(max_cost, t_elem.cost); if (ptr >= buf.size()) keepK(); } elem<T> operator[](intT i) { if (i < ptr) return buf[i]; else return elem<T>(); } inline size_t size() {return ptr;} inline double back() { if(ptr<k){return std::numeric_limits<double>::max();} return max_cost; } }; } template <int dim, typename nodeT, typename objT> void knnRangeHelper2(nodeT *tree, objT &q, double radius, knnBuf::buffer<objT *> &out) { int relation = tree->boxBallCompare(q, radius, tree->getMin(), tree->getMax()); if (relation == tree->boxExclude) { return; } // else if (relation == tree->boxInclude) // { // for (size_t i = 0; i < tree->size(); ++i) // { // objT *p = tree->getItem(i); // out.insert(knnBuf::elem(q.dist(*p), p)); // } // } else { // intersect if (tree->isLeaf()) { for (size_t i = 0; i < tree->size(); ++i) { objT *p = tree->getItem(i); double dist = q.dist(*p); if (dist <= radius) { out.insert(knnBuf::elem(dist, p)); radius = out.back(); } } } else { knnRangeHelper2<dim, nodeT, objT>(tree->L(), q, radius, out); radius = out.back(); knnRangeHelper2<dim, nodeT, objT>(tree->R(), q, radius, out); } } } template <int dim, typename nodeT, typename objT> void knnHelper2(nodeT *tree, objT &q, knnBuf::buffer<objT *> &out) { // find the leaf first int relation = tree->boxCompare(tree->getMin(), tree->getMax(), point<dim>(q.coords()), point<dim>(q.coords())); if (relation == tree->boxExclude) { return; } else { if (tree->isLeaf()) { // basecase for (size_t i = 0; i < tree->size(); ++i) { objT *p = tree->getItem(i); out.insert(knnBuf::elem(q.dist(*p), p)); } } else { knnHelper2<dim, nodeT, objT>(tree->L(), q, out); knnHelper2<dim, nodeT, objT>(tree->R(), q, out); } } if (!out.hasK()) { if (tree->siblin() == NULL) { throw std::runtime_error("Error, knnHelper reached root node without enough neighbors."); } for (size_t i = 0; i < tree->siblin()->size(); ++i) { objT *p = tree->siblin()->getItem(i); out.insert(knnBuf::elem(q.dist(*p), p)); } } else { // Buffer filled to a least k if (tree->siblin() != NULL) { knnRangeHelper2<dim, nodeT, objT>(tree->siblin(), q, out.back(), out); } } } template <int dim, class objT> void batchKnn2(parlay::slice<objT *, objT *> queries, size_t k, tree<dim, objT> *tree, parlay::slice<size_t *, size_t *> idx, parlay::slice<int *, int *> ordermap, //only used when shuffle search or shuffle tree is true parlay::slice<int *, int *> treemap, //only used when shuffle search or shuffle tree is true bool shuffle_search = false, bool shuffle_tree = false, bool sorted=false) { using nodeT = node<dim, objT>; bool freeTree = false; if (!tree) { freeTree = true; tree = build<dim, objT>(queries, true); } auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size()); // auto idx = parlay::sequence<size_t>(k * queries.size()); parlay::parallel_for(0, queries.size(), [&](size_t i) { knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k)); knnHelper2<dim, nodeT, objT>(tree, queries[i], buf); buf.keepK(); if (sorted){buf.sort();} size_t idx_i = shuffle_search ? ordermap[i] : i; // size_t idx_i = queries[i].attribute; for (size_t j = 0; j < k; ++j){ size_t result_id = buf[j].entry - tree->items_begin; result_id = shuffle_tree ? treemap[result_id] : result_id; idx[idx_i * k + j] = result_id; } }); if (freeTree) free(tree); // return idx; } template <int dim, typename nodeT, typename objT> void knnRangeHelper(nodeT *tree, objT &q, point<dim> qMin, point<dim> qMax, double radius, knnBuf::buffer<objT *> &out) { int relation = tree->boxCompare(qMin, qMax, tree->getMin(), tree->getMax()); if (relation == tree->boxExclude) { return; } else if (relation == tree->boxInclude) { for (size_t i = 0; i < tree->size(); ++i) { objT *p = tree->getItem(i); out.insert(knnBuf::elem(q.dist(*p), p)); } } else { // intersect if (tree->isLeaf()) { for (size_t i = 0; i < tree->size(); ++i) { objT *p = tree->getItem(i); double dist = q.dist(*p); if (dist <= radius) { out.insert(knnBuf::elem(dist, p)); } } } else { knnRangeHelper<dim, nodeT, objT>(tree->L(), q, qMin, qMax, radius, out); knnRangeHelper<dim, nodeT, objT>(tree->R(), q, qMin, qMax, radius, out); } } } template <int dim, typename nodeT, typename objT> void knnRange(nodeT *tree, objT &q, double radius, knnBuf::buffer<objT *> &out) { point<dim> qMin, qMax; for (size_t i = 0; i < dim; i++) { auto tmp = q[i] - radius; qMin[i] = tmp; qMax[i] = tmp + radius * 2; } knnRangeHelper<dim, nodeT, objT>(tree, q, qMin, qMax, radius, out); } template <int dim, typename nodeT, typename objT> void knnHelper(nodeT *tree, objT &q, knnBuf::buffer<objT *> &out) { // find the leaf first int relation = tree->boxCompare(tree->getMin(), tree->getMax(), point<dim>(q.coords()), point<dim>(q.coords())); if (relation == tree->boxExclude) { return; } else { if (tree->isLeaf()) { // basecase for (size_t i = 0; i < tree->size(); ++i) { objT *p = tree->getItem(i); out.insert(knnBuf::elem(q.dist(*p), p)); } } else { knnHelper<dim, nodeT, objT>(tree->L(), q, out); knnHelper<dim, nodeT, objT>(tree->R(), q, out); } } if (!out.hasK()) { if (tree->siblin() == NULL) { throw std::runtime_error("Error, knnHelper reached root node without enough neighbors."); } for (size_t i = 0; i < tree->siblin()->size(); ++i) { objT *p = tree->siblin()->getItem(i); out.insert(knnBuf::elem(q.dist(*p), p)); } } else { // Buffer filled to a least k if (tree->siblin() != NULL) { knnBuf::elem tmp = out.keepK(); knnRange<dim, nodeT, objT>(tree->siblin(), q, tmp.cost, out); } } } // the tree->items have to be the same as queries template <int dim, class objT> parlay::sequence<size_t> batchKnn(parlay::slice<objT *, objT *> queries, size_t k, node<dim, objT> *tree, bool sorted) { using nodeT = node<dim, objT>; bool freeTree = false; if (!tree) { freeTree = true; tree = build<dim, objT>(queries, true); } auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size()); auto idx = parlay::sequence<size_t>(k * queries.size()); parlay::parallel_for(0, queries.size(), [&](size_t i) { knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k)); knnHelper<dim, nodeT, objT>(tree, queries[i], buf); buf.keepK(); if (sorted){ buf.sort();} for (size_t j = 0; j < k; ++j) { idx[i * k + j] = buf[j].entry - queries.begin(); } }); if (freeTree) free(tree); return idx; } template <int dim, typename nodeT, typename objT> void traverseTree(nodeT *tree, objT &query, knnBuf::buffer<objT *> &out, int k) { if (tree->isLeaf()) { // basecase for (size_t i = 0; i < tree->size(); ++i) { objT *p = tree->getItem(i); out.insert(knnBuf::elem(query.dist(*p), p)); } return; } traverseTree<dim, nodeT, objT>(tree->left, query, out, k); traverseTree<dim, nodeT, objT>(tree->right, query, out, k); } template <int dim, class objT> parlay::sequence<size_t> batchTraverse(parlay::slice<objT *, objT *> queries, size_t k, node<dim, objT> *tree = nullptr, bool sorted = false) { using nodeT = node<dim, objT>; bool freeTree = false; if (!tree) { freeTree = true; tree = build<dim, objT>(queries, true); } auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size()); auto idx = parlay::sequence<size_t>(k * queries.size()); parlay::parallel_for(0, queries.size(), [&](size_t i) { knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k)); traverseTree<dim, nodeT, objT>(tree, queries[i], buf, k); buf.keepK(); if (sorted) buf.sort(); for (size_t j = 0; j < k; ++j) { idx[i * k + j] = buf[j].entry - queries.begin(); } }); if (freeTree) free(tree); return idx; } // template <int dim, typename nodeT, typename objT> // void knnHelperSimple(nodeT *tree, objT &query, knnBuf::buffer<objT *> &out, int k) // { // if (tree->isLeaf()) // { // // basecase // for (size_t i = 0; i < tree->size(); ++i) // { // objT *p = tree->getItem(i); // out.insert(knnBuf::elem(query.dist(*p), p)); // } // return; // } // const double split = tree->getSplit(); // const int axis = tree->k; // nodeT* next[2] = {tree->left, tree->right}; //next[0] = tree->left; next[1] = tree->right; // const int dir = query[axis] < split ? 0 : 1; // knnHelperSimple<dim, nodeT, objT>(next[dir], query, out, k); // // nnSearchRecursive(query, node->next[dir], guess, minDist); // const double diff = fabs(query[axis] - split); // if ((int)out.size() < k || diff < out.back()) knnHelperSimple<dim, nodeT, objT>(next[!dir], query, out, k); // // knnSearchRecursive(query, node->next[!dir], queue, k); // } // template <int dim, class objT> // parlay::sequence<size_t> batchKnnSimple(parlay::slice<objT *, objT *> queries, // size_t k, // node<dim, objT> *tree = nullptr, // bool sorted = false) // { // using nodeT = node<dim, objT>; // bool freeTree = false; // if (!tree) // { // freeTree = true; // tree = build<dim, objT>(queries, true); // } // auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size()); // auto idx = parlay::sequence<size_t>(k * queries.size()); // parlay::parallel_for(0, queries.size(), [&](size_t i) // { // knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k)); // knnHelperSimple<dim, nodeT, objT>(tree, queries[i], buf, k); // // std::cout <<"done" << std::endl; // buf.keepK(); // if (sorted) // buf.sort(); // for (size_t j = 0; j < k; ++j) // { // idx[i * k + j] = buf[j].entry - queries.begin(); // } // // if(i==0){ // // for (size_t j = 0; j < k; ++j) // // { // // std::cout << buf[j].entry - queries.begin() << " " << buf[j].cost << std::endl; // // } // // } // }); // if (freeTree) // free(tree); // return idx; // } // template <int dim, class objT> // parlay::sequence<size_t> batchKnnOmp(parlay::slice<objT *, objT *> queries, // size_t k, // node<dim, objT> *tree, // bool sorted) // { // using nodeT = node<dim, objT>; // bool freeTree = false; // if (!tree) // { // freeTree = true; // tree = build<dim, objT>(queries, true); // } // auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size()); // auto idx = parlay::sequence<size_t>(k * queries.size()); // #pragma omp parallel for // num_threads( numThreads/2 ) proc_bind(master) // for ( size_t i = 0; i < queries.size(); i++ ){ // // int place_num = omp_get_place_num(); // knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k)); // knnHelper<dim, nodeT, objT>(tree, queries[i], buf); // buf.keepK(); // if (sorted) // buf.sort(); // for (size_t j = 0; j < k; ++j) // { // idx[i * k + j] = buf[j].entry - queries.begin(); // } // } // if (freeTree) // free(tree); // return idx; // } template <int dim, typename objT> parlay::sequence<size_t> bruteforceKnn(parlay::sequence<objT> &queries, size_t k) { auto out = parlay::sequence<knnBuf::elem<objT *>>(2 * k * queries.size()); auto idx = parlay::sequence<size_t>(k * queries.size()); parlay::parallel_for(0, queries.size(), [&](size_t i) { objT q = queries[i]; knnBuf::buffer buf = knnBuf::buffer<objT *>(k, out.cut(i * 2 * k, (i + 1) * 2 * k)); for (size_t j = 0; j < queries.size(); ++j) { objT *p = &queries[j]; buf.insert(elem(q.dist(p), p)); } buf.keepK(); for (size_t j = 0; j < k; ++j) { idx[i * k + j] = buf[j].entry - queries.data(); } }); return idx; } } // End namespace pargeo
fmt-m.c
// driver routines for fmt funcs created by gen-fmt #include <stdlib.h> #include "fmt.h" #include "fmt-m.h" #ifdef USE_CUDA #include "cuda/cuda-fmt-drv.h" #endif int fmt_m_init(void) { int ret=-1; ret = fmt_method1_init(); if (ret<0) return -1; ret = fmt_method2_init(); if (ret<0) return -2; ret = fmt_method3_init(); if (ret<0) return -3; #ifdef USE_CUDA #pragma omp master ret = cuda_fmt_m_init(); if (ret<0) return -4; // atexit( cuda_fmt_m_finalize() ); #endif return 0; }; void (*fmt_m[]) ( const double t, const double coef, double f[] ) = { fmt0_method1, fmt1_method1, fmt2_method1, fmt3_method1, fmt4_method1, fmt5_method1, fmt6_method3, fmt7_method3, fmt8_method3, }; void fmt_mm(const int m, const double t, const double coef, double f[] ) { fmt_m[m](t, coef, f); #if 0 if (m==0) fmt(f, 0, t, coef); else fmt_m[m](t, coef, f); #endif };
imutils.c
/* * Author: Curtis McCully * October 2014 * Licensed under a 3-clause BSD style license - see LICENSE.rst * * Originally written in C++ in 2011 * See also https://github.com/cmccully/lacosmicx * * This file contains image utility functions for SCRAPPY. These are the most * computationally expensive pieces of the calculation so they have been ported * to C. * * Many thanks to Nicolas Devillard who wrote the optimized methods for finding * the median and placed them in the public domain. I have noted in the * comments places that use Nicolas Devillard's code. * * Parallelization has been achieved using OpenMP. Using a compiler that does * not support OpenMP, e.g. clang currently, the code should still compile and * run serially without issue. I have tried to be explicit as possible about * specifying which variables are private and which should be shared, although * we never actually have any shared variables. We use firstprivate instead. * This does mean that it is important that we never have two threads write to * the same memory position at the same time. * * All calculations are done with 32 bit floats to keep the memory footprint * small. */ #include<Python.h> #include "imutils.h" /* Subsample an array 2x2 given an input array data with size nx x ny. Each * pixel is replicated into 4 pixels; no averaging is performed. The results * are saved in the output array. The output array should already be allocated * as we work on it in place. Data should be striped in the x direction such * that the memory location of pixel i,j is data[nx *j + i]. */ void PySubsample(float* data, float* output, int nx, int ny) { PyDoc_STRVAR(PySubsample__doc__, "PySubample(data, output, nx, ny) -> void\n\n" "Subsample an array 2x2 given an input array data with size " "nx x ny.The results are saved in the output array. The output " "array should already be allocated as we work on it in place. Each" " pixel is replicated into 4 pixels; no averaging is performed. " "Data should be striped in the x direction such that the memory " "location of pixel i,j is data[nx *j + i]."); /* Precalculate the new length; minor optimization */ int padnx = 2 * nx; /* Loop indices */ int i, j, nxj, padnxj; /* Loop over all pixels */ #pragma omp parallel for firstprivate(data, output, nx, ny, padnx) \ private(i, j, nxj, padnxj) for (j = 0; j < ny; j++) { nxj = nx * j; padnxj = 2 * padnx * j; for (i = 0; i < nx; i++) { /* Copy the pixel value into a 2x2 grid on the output image */ output[2 * i + padnxj] = data[i + nxj]; output[2 * i + padnxj + padnx] = data[i + nxj]; output[2 * i + 1 + padnxj + padnx] = data[i + nxj]; output[2 * i + 1 + padnxj] = data[i + nxj]; } } return; } /* Rebin an array 2x2, with size (2 * nx) x (2 * ny). Rebin the array by block * averaging 4 pixels back into 1. This is effectively the opposite of * subsample (although subsample does not do an average). The results are saved * in the output array. The output array should already be allocated as we work * on it in place. Data should be striped in the x direction such that the * memory location of pixel i,j is data[nx *j + i]. */ void PyRebin(float* data, float* output, int nx, int ny) { PyDoc_STRVAR(PyRebin__doc__, "PyRebin(data, output, nx, ny) -> void\n \n" "Rebin an array 2x2, with size (2 * nx) x (2 * ny). Rebin the " "array by block averaging 4 pixels back into 1. This is " "effectively the opposite of subsample (although subsample does " "not do an average). The results are saved in the output array. " "The output array should already be allocated as we work on it in " "place. Data should be striped in the x direction such that the " "memory location of pixel i,j is data[nx *j + i]."); /* Size of original array */ int padnx = nx * 2; /* Loop variables */ int i, j, nxj, padnxj; /* Pixel value p. Each thread needs its own copy of this variable so we * wait to initialize it until the pragma below */ float p; #pragma omp parallel for firstprivate(output, data, nx, ny, padnx) \ private(i, j, nxj, padnxj, p) /*Loop over all of the pixels */ for (j = 0; j < ny; j++) { nxj = nx * j; padnxj = 2 * padnx * j; for (i = 0; i < nx; i++) { p = data[2 * i + padnxj]; p += data[2 * i + padnxj + padnx]; p += data[2 * i + 1 + padnxj + padnx]; p += data[2 * i + 1 + padnxj]; p /= 4.0; output[i + nxj] = p; } } return; } /* Convolve an image of size nx x ny with a kernel of size kernx x kerny. The * results are saved in the output array. The output array should already be * allocated as we work on it in place. Data and kernel should both be striped * in the x direction such that the memory location of pixel i,j is * data[nx *j + i]. */ void PyConvolve(float* data, float* kernel, float* output, int nx, int ny, int kernx, int kerny) { PyDoc_STRVAR(PyConvolve__doc__, "PyConvolve(data, kernel, output, nx, ny, kernx, kerny) -> void\n\n" "Convolve an image of size nx x ny with a a kernel of size " "kernx x kerny. The results are saved in the output array. The " "output array should already be allocated as we work on it in " "place. Data and kernel should both be striped along the x " "direction such that the memory location of pixel i,j is " "data[nx *j + i]."); /* Get the width of the borders that we will pad with zeros */ int bnx = (kernx - 1) / 2; int bny = (kerny - 1) / 2; /* Calculate the dimensions of the array including padded border */ int padnx = nx + kernx - 1; int padny = ny + kerny - 1; /* Get the total number of pixels in the padded array */ int padnxny = padnx * padny; /*Get the total number of pixels in the output image */ int nxny = nx * ny; /*Allocate the padded array */ float* padarr = (float *) malloc(padnxny * sizeof(float)); /* Loop variables. These should all be thread private. */ int i, j; int nxj; int padnxj; /* Inner loop variables. Again thread private. */ int k, l; int kernxl, padnxl; /* Define a sum variable to use in the convolution calculation. Each * thread needs its own copy of this so it should be thread private. */ float sum; /* Precompute maximum good index in each dimension */ int xmaxgood = nx + bnx; int ymaxgood = ny + bny; /* Set the borders of padarr = 0.0 * Fill the rest of the padded array with the input data. */ #pragma omp parallel for \ firstprivate(padarr, data, nx, padnx, padny, bnx, bny, xmaxgood, ymaxgood)\ private(nxj, padnxj, i, j) for (j = 0; j < padny; j++) { padnxj = padnx * j; nxj = nx * (j - bny); for (i = 0; i < padnx; i++) { if (i < bnx || j < bny || j >= ymaxgood || i >= xmaxgood) { padarr[padnxj + i] = 0.0; } else { padarr[padnxj + i] = data[nxj + i - bnx]; } } } /* Calculate the convolution */ /* Loop over all pixels */ #pragma omp parallel for \ firstprivate(padarr, output, nx, ny, padnx, bnx, bny, kernx) \ private(nxj, padnxj, kernxl, padnxl, i, j, k, l, sum) for (j = 0; j < ny; j++) { nxj = nx * j; /* Note the + bvy in padnxj */ padnxj = padnx * (j + bny); for (i = 0; i < nx; i++) { sum = 0.0; /* Note that the sums in the definition of the convolution go from * -border width to + border width */ for (l = -bny; l <= bny; l++) { padnxl = padnx * (l + j + bny); kernxl = kernx * (-l + bny); for (k = -bnx; k <= bnx; k++) { sum += kernel[bnx - k + kernxl] * padarr[padnxl + k + i + bnx]; } } output[nxj + i] = sum; } } free(padarr); return; } /* Convolve an image of size nx x ny the following kernel: * 0 -1 0 * -1 4 -1 * 0 -1 0 * The results are saved in the output array. The output array should * already be allocated as we work on it in place. * This is a discrete version of the Laplacian operator. * Data should be striped in the x direction such that the memory location of * pixel i,j is data[nx *j + i]. */ void PyLaplaceConvolve(float* data, float* output, int nx, int ny) { PyDoc_STRVAR(PyLaplaceConvolve__doc__, "PyLaplaceConvolve(data, output, nx, ny) -> void\n\n" "Convolve an image of size nx x ny the following kernel:\n" " 0 -1 0\n" "-1 4 -1\n" " 0 -1 0\n" "This is a discrete version of the Laplacian operator. The results" " are saved in the output array. The output array should already " "be allocated as we work on it in place.Data should be striped in " "the x direction such that the memory location of pixel i,j is " "data[nx *j + i]."); /* Precompute the total number of pixels in the image */ int nxny = nx * ny; /* Loop variables */ int i, j, nxj; /* Pixel value p. Each thread will need its own copy of this so we need to * make it private*/ float p; /* Because we know the form of the kernel, we can short circuit the * convolution and calculate the results with inner nest for loops. */ /*Loop over all of the pixels except the edges which we will do explicitly * below */ #pragma omp parallel for firstprivate(nx, ny, output, data) \ private(i, j, nxj, p) for (j = 1; j < ny - 1; j++) { nxj = nx * j; for (i = 1; i < nx - 1; i++) { p = 4.0 * data[nxj + i]; p -= data[i + 1 + nxj]; p -= data[i - 1 + nxj]; p -= data[i + nxj + nx]; p -= data[i + nxj - nx]; output[nxj + i] = p; } } /* Leave the corners until the very end */ #pragma omp parallel firstprivate(output, data, nx, nxny) private(i) /* Top and Bottom Rows */ for (i = 1; i < nx - 1; i++) { output[i] = 4.0 * data[i] - data[i + 1] - data[i - 1] - data[i + nx]; p = 4.0 * data[i + nxny - nx]; p -= data[i + 1 + nxny - nx]; p -= data[i + nxny - nx - 1]; p -= data[i - nx + nxny - nx]; output[i + nxny - nx] = p; } #pragma omp parallel firstprivate(output, data, nx, ny) private(j, nxj) /* First and Last Column */ for (j = 1; j < ny - 1; j++) { nxj = nx * j; p = 4.0 * data[nxj]; p -= data[nxj + 1]; p -= data[nxj + nx]; p -= data[nxj - nx]; output[nxj] = p; p = 4.0 * data[nxj + nx - 1]; p -= data[nxj + nx - 2]; p -= data[nxj + nx + nx - 1]; p -= data[nxj - 1]; output[nxj + nx - 1] = p; } /* Bottom Left Corner */ output[0] = 4.0 * data[0] - data[1] - data[nx]; /* Bottom Right Corner */ output[nx - 1] = 4.0 * data[nx - 1] - data[nx - 2] - data[nx + nx - 1]; /* Top Left Corner */ p = 4.0 * data[nxny - nx]; p -= data[nxny - nx + 1]; p -= data[nxny - nx - nx]; output[nxny - nx] = p; /* Top Right Corner */ p = 4.0 * data[nxny - 1]; p -= data[nxny - 2]; p -= data[nxny - 1 - nx]; output[nxny - 1] = p; return; } /* Perform a boolean dilation on an array of size nx x ny. The results are * saved in the output array. The output array should already be allocated as * we work on it in place. * Dilation is the boolean equivalent of a convolution but using logical ors * instead of a sum. * We apply the following kernel: * 1 1 1 * 1 1 1 * 1 1 1 * The binary dilation is not computed for a 1 pixel border around the image. * These pixels are copied from the input data. Data should be striped along * the x direction such that the memory location of pixel i,j is * data[i + nx * j]. */ void PyDilate3(bool* data, bool* output, int nx, int ny) { PyDoc_STRVAR(PyDilate3__doc__, "PyDilate3(data, output, nx, ny) -> void\n\n" "Perform a boolean dilation on an array of size nx x ny. The " "results are saved in the output array which should already be " "allocated as we work on it in place. " "Dilation is the boolean equivalent of a convolution but using " "logical or instead of a sum. We apply a 3x3 kernel of all ones. " "Dilation is not computed for a 1 pixel border which is copied " "from the input data. Data should be striped along the x-axis " "such that the location of pixel i,j is data[i + nx * j]."); /* Precompute the total number of pixels; minor optimization */ int nxny = nx * ny; /* Loop variables */ int i, j, nxj; /* Pixel value p. Each thread needs its own unique copy of this so we don't initialize this until the pragma below. */ bool p; #pragma omp parallel for firstprivate(output, data, nxny, nx, ny) \ private(i, j, nxj, p) /* Loop through all of the pixels excluding the border */ for (j = 1; j < ny - 1; j++) { nxj = nx * j; for (i = 1; i < nx - 1; i++) { /*Start in the middle and work out */ p = data[i + nxj]; /* Right 1 */ p = p || data[i + 1 + nxj]; /* Left 1 */ p = p || data[i - 1 + nxj]; /* Up 1 */ p = p || data[i + nx + nxj]; /* Down 1 */ p = p || data[i - nx + nxj]; /* Up 1 Right 1 */ p = p || data[i + 1 + nx + nxj]; /* Up 1 Left 1 */ p = p || data[i - 1 + nx + nxj]; /* Down 1 Right 1 */ p = p || data[i + 1 - nx + nxj]; /* Down 1 Left 1 */ p = p || data[i - 1 - nx + nxj]; output[i + nxj] = p; } } #pragma omp parallel firstprivate(output, data, nx, nxny) private(i) /* For the borders, copy the data from the input array */ for (i = 0; i < nx; i++) { output[i] = data[i]; output[nxny - nx + i] = data[nxny - nx + i]; } #pragma omp parallel firstprivate(output, data, nx, ny) private(j, nxj) for (j = 0; j < ny; j++) { nxj = nx * j; output[nxj] = data[nxj]; output[nxj - 1 + nx] = data[nxj - 1 + nx]; } return; } /* Do niter iterations of boolean dilation on an array of size nx x ny. The * results are saved in the output array. The output array should already be * allocated as we work on it in place. * Dilation is the boolean equivalent of a convolution but using logical ors * instead of a sum. * We apply the following kernel: * 0 1 1 1 0 * 1 1 1 1 1 * 1 1 1 1 1 * 1 1 1 1 1 * 0 1 1 1 0 * The edges are padded with zeros so that the dilation operator is defined for * all pixels. Data should be striped along the x direction such that the * memory location of pixel i,j is data[i + nx * j]. */ void PyDilate5(bool* data, bool* output, int niter, int nx, int ny) { PyDoc_STRVAR(PyDilate5__doc__, "PyDilate5(data, output, nx, ny) -> void\n\n" "Do niter iterations of boolean dilation on an array of size " "nx x ny. The results are saved in the output array. The output " "array should already be allocated as we work on it in place. " "Dilation is the boolean equivalent of a convolution but using " "logical ors instead of a sum. We apply the following kernel:\n" "0 1 1 1 0\n" "1 1 1 1 1\n" "1 1 1 1 1\n" "1 1 1 1 1\n" "0 1 1 1 0\n" "Data should be striped along the x direction such that the " "location of pixel i,j is data[i + nx * j]."); /* Pad the array with a border of zeros */ int padnx = nx + 4; int padny = ny + 4; /* Precompute the total number of pixels; minor optimization */ int padnxny = padnx * padny; int nxny = nx * ny; /* The padded array to work on */ bool* padarr = (bool *) malloc(padnxny * sizeof(bool)); /*Loop indices */ int i, j, nxj, padnxj; int iter; /* Pixel value p. This needs to be unique for each thread so we initialize * it below inside the pragma. */ bool p; #pragma omp parallel firstprivate(padarr, padnx, padnxny) private(i) /* Initialize the borders of the padded array to zero */ for (i = 0; i < padnx; i++) { padarr[i] = false; padarr[i + padnx] = false; padarr[padnxny - padnx + i] = false; padarr[padnxny - padnx - padnx + i] = false; } #pragma omp parallel firstprivate(padarr, padnx, padny) private(j, padnxj) for (j = 0; j < padny; j++) { padnxj = padnx * j; padarr[padnxj] = false; padarr[padnxj + 1] = false; padarr[padnxj + padnx - 1] = false; padarr[padnxj + padnx - 2] = false; } #pragma omp parallel firstprivate(output, data, nxny) private(i) /* Initialize the output array to the input data */ for (i = 0; i < nxny; i++) { output[i] = data[i]; } /* Outer iteration loop */ for (iter = 0; iter < niter; iter++) { #pragma omp parallel for firstprivate(padarr, output, nx, ny, padnx, iter) \ private(nxj, padnxj, i, j) /* Initialize the padded array to the output from the latest * iteration*/ for (j = 0; j < ny; j++) { padnxj = padnx * j; nxj = nx * j; for (i = 0; i < nx; i++) { padarr[i + 2 + padnx + padnx + padnxj] = output[i + nxj]; } } /* Loop over all pixels */ #pragma omp parallel for firstprivate(padarr, output, nx, ny, padnx, iter) \ private(nxj, padnxj, i, j, p) for (j = 0; j < ny; j++) { nxj = nx * j; /* Note the + 2 padding in padnxj */ padnxj = padnx * (j + 2); for (i = 0; i < nx; i++) { /* Start with the middle pixel and work out */ p = padarr[i + 2 + padnxj]; /* Right 1 */ p = p || padarr[i + 3 + padnxj]; /* Left 1 */ p = p || padarr[i + 1 + padnxj]; /* Up 1 */ p = p || padarr[i + 2 + padnx + padnxj]; /* Down 1 */ p = p || padarr[i + 2 - padnx + padnxj]; /* Up 1 Right 1 */ p = p || padarr[i + 3 + padnx + padnxj]; /* Up 1 Left 1 */ p = p || padarr[i + 1 + padnx + padnxj]; /* Down 1 Right 1 */ p = p || padarr[i + 3 - padnx + padnxj]; /* Down 1 Left 1 */ p = p || padarr[i + 1 - padnx + padnxj]; /* Right 2 */ p = p || padarr[i + 4 + padnxj]; /* Left 2 */ p = p || padarr[i + padnxj]; /* Up 2 */ p = p || padarr[i + 2 + padnx + padnx + padnxj]; /* Down 2 */ p = p || padarr[i + 2 - padnx - padnx + padnxj]; /* Right 2 Up 1 */ p = p || padarr[i + 4 + padnx + padnxj]; /* Right 2 Down 1 */ p = p || padarr[i + 4 - padnx + padnxj]; /* Left 2 Up 1 */ p = p || padarr[i + padnx + padnxj]; /* Left 2 Down 1 */ p = p || padarr[i - padnx + padnxj]; /* Up 2 Right 1 */ p = p || padarr[i + 3 + padnx + padnx + padnxj]; /* Up 2 Left 1 */ p = p || padarr[i + 1 + padnx + padnx + padnxj]; /* Down 2 Right 1 */ p = p || padarr[i + 3 - padnx - padnx + padnxj]; /* Down 2 Left 1 */ p = p || padarr[i + 1 - padnx - padnx + padnxj]; output[i + nxj] = p; } } } free(padarr); return; }
GB_unop__signum_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fp64_fp64) // op(A') function: GB (_unop_tran__signum_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = GB_signum (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signum (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = GB_signum (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = GB_signum (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = GB_signum (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
laplace2d.c
#include <stdio.h> #include <math.h> #include <string.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; #pragma acc data copy(A), create(Anew) while ( error > tol && iter < iter_max ) { error = 0.0; //#pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } //#pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); }
array_args.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_ #define LIGHTGBM_UTILS_ARRAY_AGRS_H_ #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <utility> #include <vector> namespace LightGBM { /*! * \brief Contains some operation for an array, e.g. ArgMax, TopK. */ template<typename VAL_T> class ArrayArgs { public: inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads); std::vector<size_t> arg_maxs(num_threads, 0); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t start = step * i; if (start >= array.size()) { continue; } size_t end = std::min(array.size(), start + step); size_t arg_max = start; for (size_t j = start + 1; j < end; ++j) { if (array[j] > array[arg_max]) { arg_max = j; } } arg_maxs[i] = arg_max; } size_t ret = arg_maxs[0]; for (int i = 1; i < num_threads; ++i) { if (array[arg_maxs[i]] > array[ret]) { ret = arg_maxs[i]; } } return ret; } inline static size_t ArgMax(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } if (array.size() > 1024) { return ArgMaxMT(array); } else { size_t arg_max = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } } inline static size_t ArgMin(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static size_t ArgMax(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_max = 0; for (size_t i = 1; i < n; ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } inline static size_t ArgMin(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < n; ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) { int i = start - 1; int j = end - 1; int p = i; int q = j; if (start >= end) { return; } std::vector<VAL_T>& ref = *arr; VAL_T v = ref[end - 1]; for (;;) { while (ref[++i] > v) {} while (v > ref[--j]) { if (j == start) { break; } } if (i >= j) { break; } std::swap(ref[i], ref[j]); if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); } if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); } } std::swap(ref[i], ref[end - 1]); j = i - 1; i = i + 1; for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); } for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); } *l = j; *r = i; } // Note: k refer to index here. e.g. k=0 means get the max number. inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) { if (start >= end - 1) { return start; } int l = start; int r = end - 1; Partition(arr, start, end, &l, &r); // if find or all elements are the same. if ((k > l && k < r) || (l == start - 1 && r == end - 1)) { return k; } else if (k <= l) { return ArgMaxAtK(arr, start, l + 1, k); } else { return ArgMaxAtK(arr, r, end, k); } } // Note: k is 1-based here. e.g. k=3 means get the top-3 numbers. inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) { out->clear(); if (k <= 0) { return; } for (auto val : array) { out->push_back(val); } if (static_cast<size_t>(k) >= array.size()) { return; } ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1); out->erase(out->begin() + k, out->end()); } inline static void Assign(std::vector<VAL_T>* array, VAL_T t, size_t n) { array->resize(n); for (size_t i = 0; i < array->size(); ++i) { (*array)[i] = t; } } inline static bool CheckAllZero(const std::vector<VAL_T>& array) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != VAL_T(0)) { return false; } } return true; } inline static bool CheckAll(const std::vector<VAL_T>& array, VAL_T t) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != t) { return false; } } return true; } }; } // namespace LightGBM #endif // LightGBM_UTILS_ARRAY_AGRS_H_
conv3x3s1_winograd64_transform_kernel_neon5_GgG.h
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_transform_kernel_neon5_GgG(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } }
mpi_dynamic_mandelbrot.c
// // mandelbrot.c // // // The Mandelbrot calculation is to iterate the equation // z = z*z + c, where z and c are complex numbers, z is initially // zero, and c is the coordinate of the point being tested. If // the magnitude of z remains less than 2 for ever, then the point // c is in the Mandelbrot set. In this code We write out the number of iterations // before the magnitude of z exceeds 2, or UCHAR_MAX, whichever is // smaller.// // // #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <mpi.h> #include <stdio.h> #include <unistd.h> #include <omp.h> /*void color(int red, int green, int blue) { fputc((char)red, stdout); fputc((char)green, stdout); fputc((char)blue, stdout); }*/ int main (int argc, char* argv[]) { int rank, size;// id process and size of the communicator //int namelen; MPI_Init (&argc, &argv); /* starts MPI */ MPI_Comm_rank (MPI_COMM_WORLD, &rank); // get current process id MPI_Comm_size (MPI_COMM_WORLD, &size); // get number of processes int w = atoi(argv[1]), h = atoi(argv[2]), x, y; // width, height from arguments double pr, pi; double newRe, newIm, oldRe, oldIm; //real and imaginary parts of new and old z double zoom = 1, moveX = -0.5, moveY = 0; //you can change these to zoom and change position int maxIterations = atoi(argv[3]);//after how much iterations the function should stop double start, elapsed; //int rowsForWorker=h/size;// We got the number of rows that a Worker have to process int begin,end; //MPI_Get_processor_name(hostname, &namelen); // get CPU name start = MPI_Wtime(); //printf( "Hello world from process %d of %d (node %s)\n", rank, size, hostname ); //begin= rowsForWorker*rank;// first row to process the process //end= rowsForWorker*rank+ rowsForWorker;//last row to process for that process //printf("P6\n# CREATOR: Eric R. Weeks / mandel program\n"); //printf("%d %d\n255\n",w,h); /*if(rank==size-1){ end=end+h%size;// if is the last process we give the rest of the rows to process rowsForWorker=rowsForWorker+h%size; }*/ int operation; // 0 Worker x wants more work, 1 Worker wants to send the results int rowsToRead[2]; // Row to start, Row to end typedef unsigned char pixelType[3]; if(rank==0){ // Master code distribution of the work one row of the matrix for slave MPI_Status status; // Recover the slave which has sent the data int rows=-1;// At the moment there is now row to send pixelType *pixels=malloc(sizeof(pixelType)*h*w); FILE * sortida;// File to generate the intermediary results while(rows<h){ // While there exist data to send //tag 1 means an action tag = 0 means Master expects to receive expected data MPI_Recv(&operation, 1, MPI_INT, MPI_ANY_SOURCE, 1, MPI_COMM_WORLD,&status); if(operation==0){// The worker x wants more work rows++; rowsToRead[0]=rows; rowsToRead[1]=rows+1; MPI_Send(rowsToRead, 2, MPI_INT, status.MPI_SOURCE, 0, MPI_COMM_WORLD);// The master sends to the slave the file it has to process } else if(operation == 1){ MPI_Recv(pixels[rows*w], sizeof(pixelType)*w, MPI_CHAR,MPI_ANY_SOURCE, 0, MPI_COMM_WORLD,&status); } } //There is no more data time to kill slaves int slaves=1; operation=-1; for(slaves=1;slaves<size;slaves++){ rowsToRead[0]=-1; rowsToRead[1]=-1; MPI_Send(rowsToRead, 2, MPI_INT, slaves, 0, MPI_COMM_WORLD); } int i=0; int j=0; sortida= fopen("sortida.ppm","wb"); fprintf(sortida, "P6\n# CREATOR: Eric R. Weeks / mandel program\n"); fprintf(sortida, "%d %d\n255\n", w, h); x=0; y=0; for(y = 0; y < h; y++){ for(x = 0; x < w; x++){ fwrite(pixels[y*w+x],1,sizeof(pixelType),sortida); } } elapsed = MPI_Wtime() - start; fprintf(stderr, "Elapsed time: %.2lf seconds.\n", elapsed); fclose(sortida); free(pixels); } else{ pixelType * rowData=malloc(sizeof(pixelType)*w); while(1){ operation=0; MPI_Send(&operation, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); MPI_Recv(rowsToRead, 2, MPI_INT,0, 0, MPI_COMM_WORLD,MPI_STATUS_IGNORE); //operation=-1 means there is no more data to process if(rowsToRead[0]==-1 && rowsToRead[1]==-1){ break; } else{ // There is more data begin=rowsToRead[0]; end=rowsToRead[1]; #pragma omp parallel for shared(rowData,moveX,moveY,zoom) private(x,y,pr,pi,newRe,newIm,oldRe,oldIm) schedule(static) for(y =begin ; y <end ; y++){ for(x = 0; x < w; x++) { //calculate the initial real and imaginary part of z, based on the pixel location and zoom and position values pr = 1.5 * (x - w / 2) / (0.5 * zoom * w) + moveX; pi = (y - h / 2) / (0.5 * zoom * h) + moveY; newRe = newIm = oldRe = oldIm = 0; //these should start at 0,0 //"i" will represent the number of iterations int i; //start the iteration process for(i = 0; i < maxIterations; i++) { //remember value of previous iteration oldRe = newRe; oldIm = newIm; //the actual iteration, the real and imaginary part are calculated newRe = oldRe * oldRe - oldIm * oldIm + pr; newIm = 2 * oldRe * oldIm + pi; //if the point is outside the circle with radius 2: stop if((newRe * newRe + newIm * newIm) > 4) break; } // color(i % 256, 255, 255 * (i < maxIterations)); if(i == maxIterations){ rowData[x][0]=(char)0; rowData[x][1]=(char)0; rowData[x][2]=(char)0; } else { double z = sqrt(newRe * newRe + newIm * newIm); int brightness = 256 * log2(1.75 + i - log2(log2(z))) / log2((double)maxIterations); //color(brightness, brightness, 255); rowData[x][0]=(char)brightness; rowData[x][1]=(char)brightness; rowData[x][2]=(char)255; } } } operation=1; MPI_Send(&operation, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); MPI_Send(rowData, sizeof(pixelType)*w, MPI_CHAR, 0, 0, MPI_COMM_WORLD); } } free(rowData); //pixelType *pixelsWorker=malloc(sizeof(pixelType)*h*w); //from the first row to the last row } //MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % snibgo (Alan Gibson) % % January 2022 % % % % % % % % Copyright @ 2022 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" #define MaxTokenLen 100 #define RpnInit 100 #define TableExtend 0.1 #define InitNumOprStack 50 #define MinValStackSize 100 #define InitNumUserSymbols 50 typedef long double fxFltType; typedef enum { oAddEq, oSubtractEq, oMultiplyEq, oDivideEq, oPlusPlus, oSubSub, oAdd, oSubtract, oMultiply, oDivide, oModulus, oUnaryPlus, oUnaryMinus, oLshift, oRshift, oEq, oNotEq, oLtEq, oGtEq, oLt, oGt, oLogAnd, oLogOr, oLogNot, oBitAnd, oBitOr, oBitNot, oPow, oQuery, oColon, oOpenParen, oCloseParen, oOpenBracket, oCloseBracket, oOpenBrace, oCloseBrace, oAssign, oNull } OperatorE; typedef struct { OperatorE op; const char * str; int precedence; /* Higher number is higher precedence */ int nArgs; } OperatorT; static const OperatorT Operators[] = { {oAddEq, "+=", 12, 1}, {oSubtractEq, "-=", 12, 1}, {oMultiplyEq, "*=", 13, 1}, {oDivideEq, "/=", 13, 1}, {oPlusPlus, "++", 12, 0}, {oSubSub, "--", 12, 0}, {oAdd, "+", 12, 2}, {oSubtract, "-", 12, 2}, {oMultiply, "*", 13, 2}, {oDivide, "/", 13, 2}, {oModulus, "%", 13, 2}, {oUnaryPlus, "+", 14, 1}, {oUnaryMinus, "-", 14, 1}, {oLshift, "<<", 11, 2}, {oRshift, ">>", 11, 2}, {oEq, "==", 9, 2}, {oNotEq, "!=", 9, 2}, {oLtEq, "<=", 10, 2}, {oGtEq, ">=", 10, 2}, {oLt, "<", 10, 2}, {oGt, ">", 10, 2}, {oLogAnd, "&&", 6, 2}, {oLogOr, "||", 5, 2}, {oLogNot, "!", 16, 1}, {oBitAnd, "&", 8, 2}, {oBitOr, "|", 7, 2}, {oBitNot, "~", 16, 1}, {oPow, "^", 15, 2}, {oQuery, "?", 4, 1}, {oColon, ":", 4, 1}, {oOpenParen, "(", 0, 0}, {oCloseParen, ")", 0, 0}, {oOpenBracket, "[", 0, 0}, {oCloseBracket,"]", 0, 0}, {oOpenBrace, "{", 0, 0}, {oCloseBrace, "}", 0, 0}, {oAssign, "=", 3, 1}, {oNull, "onull", 17, 0} }; typedef enum { cEpsilon, cE, cOpaque, cPhi, cPi, cQuantumRange, cQuantumScale, cTransparent, cMaxRgb, cNull } ConstantE; typedef struct { ConstantE cons; fxFltType val; const char * str; } ConstantT; static const ConstantT Constants[] = { {cEpsilon, MagickEpsilon, "epsilon"}, {cE, 2.7182818284590452354, "e"}, {cOpaque, 1.0, "opaque"}, {cPhi, MagickPHI, "phi"}, {cPi, MagickPI, "pi"}, {cQuantumRange, QuantumRange, "quantumrange"}, {cQuantumScale, QuantumScale, "quantumscale"}, {cTransparent, 0.0, "transparent"}, {cMaxRgb, QuantumRange, "MaxRGB"}, {cNull, 0.0, "cnull"} }; #define FirstFunc ((FunctionE) (oNull+1)) typedef enum { fAbs = oNull+1, #if defined(MAGICKCORE_HAVE_ACOSH) fAcosh, #endif fAcos, #if defined(MAGICKCORE_HAVE_J1) fAiry, #endif fAlt, #if defined(MAGICKCORE_HAVE_ASINH) fAsinh, #endif fAsin, #if defined(MAGICKCORE_HAVE_ATANH) fAtanh, #endif fAtan2, fAtan, fCeil, fChannel, fClamp, fCosh, fCos, fDebug, fDrc, #if defined(MAGICKCORE_HAVE_ERF) fErf, #endif fExp, fFloor, fGauss, fGcd, fHypot, fInt, fIsnan, #if defined(MAGICKCORE_HAVE_J0) fJ0, #endif #if defined(MAGICKCORE_HAVE_J1) fJ1, #endif #if defined(MAGICKCORE_HAVE_J1) fJinc, #endif fLn, fLogtwo, fLog, fMax, fMin, fMod, fNot, fPow, fRand, fRound, fSign, fSinc, fSinh, fSin, fSqrt, fSquish, fTanh, fTan, fTrunc, fDo, fFor, fIf, fWhile, fU, fU0, fUP, fS, fV, fP, fSP, fVP, fNull } FunctionE; typedef struct { FunctionE func; const char * str; int nArgs; } FunctionT; static const FunctionT Functions[] = { {fAbs, "abs" , 1}, #if defined(MAGICKCORE_HAVE_ACOSH) {fAcosh, "acosh" , 1}, #endif {fAcos, "acos" , 1}, #if defined(MAGICKCORE_HAVE_J1) {fAiry, "airy" , 1}, #endif {fAlt, "alt" , 1}, #if defined(MAGICKCORE_HAVE_ASINH) {fAsinh, "asinh" , 1}, #endif {fAsin, "asin" , 1}, #if defined(MAGICKCORE_HAVE_ATANH) {fAtanh, "atanh" , 1}, #endif {fAtan2, "atan2" , 2}, {fAtan, "atan" , 1}, {fCeil, "ceil" , 1}, {fChannel, "channel" , 5}, {fClamp, "clamp" , 1}, {fCosh, "cosh" , 1}, {fCos, "cos" , 1}, {fDebug, "debug" , 1}, {fDrc, "drc" , 2}, #if defined(MAGICKCORE_HAVE_ERF) {fErf, "erf" , 1}, #endif {fExp, "exp" , 1}, {fFloor, "floor" , 1}, {fGauss, "gauss" , 2}, {fGcd, "gcd" , 2}, {fHypot, "hypot" , 2}, {fInt, "int" , 1}, {fIsnan, "isnan" , 1}, #if defined(MAGICKCORE_HAVE_J0) {fJ0, "j0" , 1}, #endif #if defined(MAGICKCORE_HAVE_J1) {fJ1, "j1" , 1}, #endif #if defined(MAGICKCORE_HAVE_J1) {fJinc, "jinc" , 1}, #endif {fLn, "ln" , 1}, {fLogtwo, "logtwo", 1}, {fLog, "log" , 1}, {fMax, "max" , 2}, {fMin, "min" , 2}, {fMod, "mod" , 2}, {fNot, "not" , 1}, {fPow, "pow" , 2}, {fRand, "rand" , 0}, {fRound, "round" , 1}, {fSign, "sign" , 1}, {fSinc, "sinc" , 1}, {fSinh, "sinh" , 1}, {fSin, "sin" , 1}, {fSqrt, "sqrt" , 1}, {fSquish, "squish", 1}, {fTanh, "tanh" , 1}, {fTan, "tan" , 1}, {fTrunc, "trunc" , 1}, {fDo, "do", 2}, {fFor, "for", 3}, {fIf, "if", 3}, {fWhile, "while", 2}, {fU, "u", 1}, {fU0, "u0", 0}, {fUP, "up", 3}, {fS, "s", 0}, {fV, "v", 0}, {fP, "p", 2}, {fSP, "sp", 2}, {fVP, "vp", 2}, {fNull, "fnull" , 0} }; #define FirstImgAttr ((ImgAttrE) (fNull+1)) typedef enum { aDepth = fNull+1, aExtent, aKurtosis, aMaxima, aMean, aMedian, aMinima, aPage, aPageX, aPageY, aPageWid, aPageHt, aPrintsize, aPrintsizeX, aPrintsizeY, aQuality, aRes, aResX, aResY, aSkewness, aStdDev, aH, aN, aT, aW, aZ, aNull } ImgAttrE; typedef struct { ImgAttrE attr; const char * str; int NeedStats; } ImgAttrT; static const ImgAttrT ImgAttrs[] = { {aDepth, "depth", 1}, {aExtent, "extent", 0}, {aKurtosis, "kurtosis", 1}, {aMaxima, "maxima", 1}, {aMean, "mean", 1}, {aMedian, "median", 1}, {aMinima, "minima", 1}, {aPage, "page", 0}, {aPageX, "page.x", 0}, {aPageY, "page.y", 0}, {aPageWid, "page.width", 0}, {aPageHt, "page.height", 0}, {aPrintsize, "printsize", 0}, {aPrintsizeX, "printsize.x", 0}, {aPrintsizeY, "printsize.y", 0}, {aQuality, "quality", 0}, {aRes, "resolution", 0}, {aResX, "resolution.x", 0}, {aResY, "resolution.y", 0}, {aSkewness, "skewness", 1}, {aStdDev, "standard_deviation", 1}, {aH, "h", 0}, {aN, "n", 0}, {aT, "t", 0}, {aW, "w", 0}, {aZ, "z", 0}, {aNull, "anull", 0} }; #define FirstSym ((SymbolE) (aNull+1)) typedef enum { sHue = aNull+1, sIntensity, sLightness, sLuma, sLuminance, sSaturation, sA, sB, sC, sG, sI, sJ, sK, sM, sO, sR, sY, sNull } SymbolE; typedef struct { SymbolE sym; const char * str; } SymbolT; static const SymbolT Symbols[] = { {sHue, "hue"}, {sIntensity, "intensity"}, {sLightness, "lightness"}, {sLuma, "luma"}, {sLuminance, "luminance"}, {sSaturation, "saturation"}, {sA, "a"}, {sB, "b"}, {sC, "c"}, {sG, "g"}, {sI, "i"}, {sJ, "j"}, {sK, "k"}, {sM, "m"}, {sO, "o"}, {sR, "r"}, {sY, "y"}, {sNull, "snull"} }; /* There is no way to access new value of pixels. This might be a future enhancement, eg "q". fP, oU and oV can have channel qualifier such as "u.r". For meta channels, we might also allow numbered channels eg "u.2" or "u.16". ... or have extra argument to p[]. */ #define FirstCont (sNull+1) /* Run-time controls are in the RPN, not explicitly in the input string. */ typedef enum { rGoto = FirstCont, rIfZeroGoto, rIfNotZeroGoto, rCopyFrom, rCopyTo, rZerStk, rNull } ControlE; typedef struct { ControlE cont; const char * str; int nArgs; } ControlT; static const ControlT Controls[] = { {rGoto, "goto", 0}, {rIfZeroGoto, "ifzerogoto", 1}, {rIfNotZeroGoto, "ifnotzerogoto", 1}, {rCopyFrom, "copyfrom", 0}, {rCopyTo, "copyto", 1}, {rZerStk, "zerstk", 0}, {rNull, "rnull", 0} }; #define NULL_ADDRESS -2 typedef struct { int addrQuery; int addrColon; } TernaryT; typedef struct { const char * str; PixelChannel pixChan; } ChannelT; #define NO_CHAN_QUAL ((PixelChannel) (-1)) #define THIS_CHANNEL ((PixelChannel) (-2)) #define HUE_CHANNEL ((PixelChannel) (-3)) #define SAT_CHANNEL ((PixelChannel) (-4)) #define LIGHT_CHANNEL ((PixelChannel) (-5)) #define INTENSITY_CHANNEL ((PixelChannel) (-6)) static const ChannelT Channels[] = { {"r", RedPixelChannel}, {"g", GreenPixelChannel}, {"b", BluePixelChannel}, {"c", CyanPixelChannel}, {"m", MagentaPixelChannel}, {"y", YellowPixelChannel}, {"k", BlackPixelChannel}, {"a", AlphaPixelChannel}, {"o", AlphaPixelChannel}, {"hue", HUE_CHANNEL}, {"saturation", SAT_CHANNEL}, {"lightness", LIGHT_CHANNEL}, {"intensity", INTENSITY_CHANNEL}, {"all", CompositePixelChannel}, {"this", THIS_CHANNEL}, {"", NO_CHAN_QUAL} }; /* The index into UserSymbols is also the index into run-time UserSymVals. */ typedef struct { char * pex; size_t len; } UserSymbolT; typedef enum { etOperator, etConstant, etFunction, etImgAttr, etSymbol, etColourConstant, etControl } ElementTypeE; static const char * sElementTypes[] = { "Operator", "Constant", "Function", "ImgAttr", "Symbol", "ColConst", "Control" }; typedef struct { ElementTypeE type; fxFltType val, val1, val2; int oprNum; int nArgs; MagickBooleanType IsRelative; MagickBooleanType DoPush; int EleNdx; int nDest; /* Number of Elements that "goto" this element */ PixelChannel ChannelQual; ImgAttrE ImgAttrQual; char * pExpStart; int lenExp; } ElementT; typedef enum { rtUnknown, rtEntireImage, rtCornerOnly } RunTypeE; typedef struct { CacheView *View; /* Other per-image metadata could go here. */ } ImgT; typedef struct { RandomInfo * magick_restrict random_info; int numValStack; int usedValStack; fxFltType * ValStack; fxFltType * UserSymVals; Quantum * thisPixel; } fxRtT; struct _FxInfo { Image * image; size_t ImgListLen; ssize_t ImgNum; MagickBooleanType NeedStats; MagickBooleanType GotStats; MagickBooleanType NeedHsl; MagickBooleanType DebugOpt; /* Whether "-debug" option is in effect */ MagickBooleanType ContainsDebug; /* Whether expression contains "debug ()" function */ char * expression; char * pex; char ShortExp[MagickPathExtent]; /* for reporting */ int teDepth; char token[MagickPathExtent]; size_t lenToken; int numElements; int usedElements; ElementT * Elements; /* Elements is read-only at runtime. */ int numUserSymbols; int usedUserSymbols; UserSymbolT * UserSymbols; int numOprStack; int usedOprStack; int maxUsedOprStack; OperatorE * OperatorStack; ChannelStatistics ** statistics; int precision; RunTypeE runType; RandomInfo **magick_restrict random_infos; ImgT * Imgs; Image ** Images; ExceptionInfo * exception; fxRtT * fxrts; }; /* Forward declarations for recursion. */ static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit); static MagickBooleanType TranslateExpression (FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll); static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe); static MagickBooleanType InitFx (FxInfo * pfx, const Image * img, MagickBooleanType CalcAllStats, ExceptionInfo *exception) { ssize_t i=0; const Image * next; pfx->ImgListLen = GetImageListLength (img); pfx->ImgNum = GetImageIndexInList (img); pfx->image = (Image *)img; pfx->NeedStats = MagickFalse; pfx->GotStats = MagickFalse; pfx->NeedHsl = MagickFalse; pfx->DebugOpt = IsStringTrue (GetImageArtifact (img, "fx:debug")); pfx->statistics = NULL; pfx->Imgs = NULL; pfx->Images = NULL; pfx->exception = exception; pfx->precision = GetMagickPrecision (); pfx->random_infos = AcquireRandomInfoTLS (); pfx->ContainsDebug = MagickFalse; pfx->runType = (CalcAllStats) ? rtEntireImage : rtCornerOnly; pfx->Imgs = (ImgT *)AcquireQuantumMemory (pfx->ImgListLen, sizeof (ImgT)); if (!pfx->Imgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Imgs", "%lu", pfx->ImgListLen); return MagickFalse; } next = GetFirstImageInList (img); for ( ; next != (Image *) NULL; next=next->next) { ImgT * pimg = &pfx->Imgs[i]; pimg->View = AcquireVirtualCacheView (next, pfx->exception); if (!pimg->View) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "View", "[%li]", i); /* dealloc any done so far, and Imgs */ for ( ; i > 0; i--) { pimg = &pfx->Imgs[i-1]; pimg->View = DestroyCacheView (pimg->View); } pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs); return MagickFalse; } i++; } pfx->Images = ImageListToArray (img, pfx->exception); return MagickTrue; } static MagickBooleanType DeInitFx (FxInfo * pfx) { ssize_t i; if (pfx->Images) pfx->Images = (Image**) RelinquishMagickMemory (pfx->Images); if (pfx->Imgs) { for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) { ImgT * pimg = &pfx->Imgs[i-1]; pimg->View = DestroyCacheView (pimg->View); } pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs); } pfx->random_infos = DestroyRandomInfoTLS (pfx->random_infos); if (pfx->statistics) { for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) { pfx->statistics[i-1]=(ChannelStatistics *) RelinquishMagickMemory (pfx->statistics[i-1]); } pfx->statistics = (ChannelStatistics**) RelinquishMagickMemory(pfx->statistics); } return MagickTrue; } static ElementTypeE TypeOfOpr (int op) { if (op < oNull) return etOperator; if (op == oNull) return etConstant; if (op <= fNull) return etFunction; if (op <= aNull) return etImgAttr; if (op <= sNull) return etSymbol; if (op <= rNull) return etControl; return (ElementTypeE) 0; } static char * SetPtrShortExp (FxInfo * pfx, char * pExp, size_t len) { #define MaxLen 20 size_t slen; char * p; *pfx->ShortExp = '\0'; if (pExp && len) { slen = CopyMagickString (pfx->ShortExp, pExp, len); if (slen > MaxLen) { (void) CopyMagickString (pfx->ShortExp+MaxLen, "...", 4); } p = strchr (pfx->ShortExp, '\n'); if (p) (void) CopyMagickString (p, "...", 4); p = strchr (pfx->ShortExp, '\r'); if (p) (void) CopyMagickString (p, "...", 4); } return pfx->ShortExp; } static char * SetShortExp (FxInfo * pfx) { return SetPtrShortExp (pfx, pfx->pex, MaxTokenLen-1); } static int FindUserSymbol (FxInfo * pfx, char * name) /* returns index into pfx->UserSymbols, and thus into pfxrt->UserSymVals, or NULL_ADDRESS if not found. */ { int i; size_t lenName; lenName = strlen (name); for (i=0; i < pfx->usedUserSymbols; i++) { UserSymbolT *pus = &pfx->UserSymbols[i]; if (lenName == pus->len && LocaleNCompare (name, pus->pex, lenName)==0) break; } if (i == pfx->usedUserSymbols) return NULL_ADDRESS; return i; } static MagickBooleanType ExtendUserSymbols (FxInfo * pfx) { pfx->numUserSymbols = (int) ceil (pfx->numUserSymbols * (1 + TableExtend)); pfx->UserSymbols = (UserSymbolT*) ResizeMagickMemory (pfx->UserSymbols, pfx->numUserSymbols * sizeof(UserSymbolT)); if (!pfx->UserSymbols) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymbols", "%i", pfx->numUserSymbols); return MagickFalse; } return MagickTrue; } static int AddUserSymbol (FxInfo * pfx, char * pex, size_t len) { UserSymbolT *pus; if (++pfx->usedUserSymbols >= pfx->numUserSymbols) { if (!ExtendUserSymbols (pfx)) return -1; } pus = &pfx->UserSymbols[pfx->usedUserSymbols-1]; pus->pex = pex; pus->len = len; return pfx->usedUserSymbols-1; } static void DumpTables (FILE * fh) { int i; for (i=0; i <= rNull; i++) { const char * str = ""; if ( i < oNull) str = Operators[i].str; if (i >= FirstFunc && i < fNull) str = Functions[i-FirstFunc].str; if (i >= FirstImgAttr && i < aNull) str = ImgAttrs[i-FirstImgAttr].str; if (i >= FirstSym && i < sNull) str = Symbols[i-FirstSym].str; if (i >= FirstCont && i < rNull) str = Controls[i-FirstCont].str; if (i==0 ) fprintf (stderr, "Operators:\n "); else if (i==oNull) fprintf (stderr, "\nFunctions:\n "); else if (i==fNull) fprintf (stderr, "\nImage attributes:\n "); else if (i==aNull) fprintf (stderr, "\nSymbols:\n "); else if (i==sNull) fprintf (stderr, "\nControls:\n "); fprintf (fh, " %s", str); } fprintf (fh, "\n"); } static char * NameOfUserSym (FxInfo * pfx, int ndx, char * buf) { UserSymbolT * pus; assert (ndx >= 0 && ndx < pfx->usedUserSymbols); pus = &pfx->UserSymbols[ndx]; (void) CopyMagickString (buf, pus->pex, pus->len+1); return buf; } static void DumpUserSymbols (FxInfo * pfx, FILE * fh) { char UserSym[MagickPathExtent]; int i; fprintf (fh, "UserSymbols (%i)\n", pfx->usedUserSymbols); for (i=0; i < pfx->usedUserSymbols; i++) { fprintf (fh, " %i: '%s'\n", i, NameOfUserSym (pfx, i, UserSym)); } } static MagickBooleanType BuildRPN (FxInfo * pfx) { pfx->numUserSymbols = InitNumUserSymbols; pfx->usedUserSymbols = 0; pfx->UserSymbols = (UserSymbolT*) AcquireMagickMemory (pfx->numUserSymbols * sizeof(UserSymbolT)); if (!pfx->UserSymbols) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymbols", "%i", pfx->numUserSymbols); return MagickFalse; } pfx->numElements = RpnInit; pfx->usedElements = 0; pfx->Elements = NULL; pfx->Elements = (ElementT*) AcquireMagickMemory (pfx->numElements * sizeof(ElementT)); if (!pfx->Elements) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Elements", "%i", pfx->numElements); return MagickFalse; } pfx->usedOprStack = 0; pfx->maxUsedOprStack = 0; pfx->numOprStack = InitNumOprStack; pfx->OperatorStack = (OperatorE*) AcquireMagickMemory (pfx->numOprStack * sizeof(OperatorE)); if (!pfx->OperatorStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "OperatorStack", "%i", pfx->numOprStack); return MagickFalse; } return MagickTrue; } static MagickBooleanType AllocFxRt (FxInfo * pfx, fxRtT * pfxrt) { int nRnd; int i; pfxrt->random_info = AcquireRandomInfo (); pfxrt->thisPixel = NULL; nRnd = 20 + 10 * (int) GetPseudoRandomValue (pfxrt->random_info); for (i=0; i < nRnd; i++) (void) GetPseudoRandomValue (pfxrt->random_info);; pfxrt->usedValStack = 0; pfxrt->numValStack = 2 * pfx->maxUsedOprStack; if (pfxrt->numValStack < MinValStackSize) pfxrt->numValStack = MinValStackSize; pfxrt->ValStack = (fxFltType*) AcquireMagickMemory (pfxrt->numValStack * sizeof(fxFltType)); if (!pfxrt->ValStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "ValStack", "%i", pfxrt->numValStack); return MagickFalse; } pfxrt->UserSymVals = NULL; if (pfx->usedUserSymbols) { pfxrt->UserSymVals = (fxFltType*) AcquireMagickMemory (pfx->usedUserSymbols * sizeof(fxFltType)); if (!pfxrt->UserSymVals) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymVals", "%i", pfx->usedUserSymbols); return MagickFalse; } for (i = 0; i < pfx->usedUserSymbols; i++) pfxrt->UserSymVals[i] = (fxFltType) 0; } return MagickTrue; } static MagickBooleanType ExtendRPN (FxInfo * pfx) { pfx->numElements = (int) ceil (pfx->numElements * (1 + TableExtend)); pfx->Elements = (ElementT*) ResizeMagickMemory (pfx->Elements, pfx->numElements * sizeof(ElementT)); if (!pfx->Elements) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Elements", "%i", pfx->numElements); return MagickFalse; } return MagickTrue; } static MagickBooleanType inline OprInPlace (int op) { return (op >= oAddEq && op <= oSubSub ? MagickTrue : MagickFalse); } static const char * OprStr (int oprNum) { const char * str; if (oprNum < 0) str = "bad OprStr"; else if (oprNum <= oNull) str = Operators[oprNum].str; else if (oprNum <= fNull) str = Functions[oprNum-FirstFunc].str; else if (oprNum <= aNull) str = ImgAttrs[oprNum-FirstImgAttr].str; else if (oprNum <= sNull) str = Symbols[oprNum-FirstSym].str; else if (oprNum <= rNull) str = Controls[oprNum-FirstCont].str; else { str = "bad OprStr"; } return str; } static MagickBooleanType DumpRPN (FxInfo * pfx, FILE * fh) { int i; fprintf (fh, "DumpRPN:"); fprintf (fh, " numElements=%i", pfx->numElements); fprintf (fh, " usedElements=%i", pfx->usedElements); fprintf (fh, " maxUsedOprStack=%i", pfx->maxUsedOprStack); fprintf (fh, " ImgListLen=%g", (double) pfx->ImgListLen); fprintf (fh, " NeedStats=%s", pfx->NeedStats ? "yes" : "no"); fprintf (fh, " GotStats=%s", pfx->GotStats ? "yes" : "no"); fprintf (fh, " NeedHsl=%s\n", pfx->NeedHsl ? "yes" : "no"); if (pfx->runType==rtEntireImage) fprintf (stderr, "EntireImage"); else if (pfx->runType==rtCornerOnly) fprintf (stderr, "CornerOnly"); fprintf (fh, "\n"); for (i=0; i < pfx->usedElements; i++) { ElementT * pel = &pfx->Elements[i]; pel->nDest = 0; } for (i=0; i < pfx->usedElements; i++) { ElementT * pel = &pfx->Elements[i]; if (pel->oprNum == rGoto || pel->oprNum == rIfZeroGoto || pel->oprNum == rIfNotZeroGoto) { if (pel->EleNdx >= 0 && pel->EleNdx < pfx->numElements) { ElementT * pelDest = &pfx->Elements[pel->EleNdx]; pelDest->nDest++; } } } for (i=0; i < pfx->usedElements; i++) { char UserSym[MagickPathExtent]; ElementT * pel = &pfx->Elements[i]; const char * str = OprStr (pel->oprNum); const char *sRelAbs = ""; if (pel->oprNum == fP || pel->oprNum == fUP || pel->oprNum == fVP || pel->oprNum == fSP) sRelAbs = pel->IsRelative ? "[]" : "{}"; if (pel->type == etColourConstant) fprintf (fh, " %i: %s vals=%.*Lg,%.*Lg,%.*Lg '%s%s' nArgs=%i ndx=%i %s", i, sElementTypes[pel->type], pfx->precision, pel->val, pfx->precision, pel->val1, pfx->precision, pel->val2, str, sRelAbs, pel->nArgs, pel->EleNdx, pel->DoPush ? "push" : "NO push"); else fprintf (fh, " %i: %s val=%.*Lg '%s%s' nArgs=%i ndx=%i %s", i, sElementTypes[pel->type], pfx->precision, pel->val, str, sRelAbs, pel->nArgs, pel->EleNdx, pel->DoPush ? "push" : "NO push"); if (pel->ImgAttrQual != aNull) fprintf (fh, " ia=%s", OprStr(pel->ImgAttrQual)); if (pel->ChannelQual != NO_CHAN_QUAL) { if (pel->ChannelQual == THIS_CHANNEL) fprintf (stderr, " ch=this"); else fprintf (stderr, " ch=%i", pel->ChannelQual); } if (pel->oprNum == rCopyTo) { fprintf (fh, " CopyTo ==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } else if (pel->oprNum == rCopyFrom) { fprintf (fh, " CopyFrom <== %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } else if (OprInPlace (pel->oprNum)) { fprintf (fh, " <==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } if (pel->nDest > 0) fprintf (fh, " <==dest(%i)", pel->nDest); fprintf (fh, "\n"); } return MagickTrue; } static void DestroyRPN (FxInfo * pfx) { pfx->numOprStack = 0; pfx->usedOprStack = 0; if (pfx->OperatorStack) pfx->OperatorStack = (OperatorE*) RelinquishMagickMemory (pfx->OperatorStack); pfx->numElements = 0; pfx->usedElements = 0; if (pfx->Elements) pfx->Elements = (ElementT*) RelinquishMagickMemory (pfx->Elements); pfx->usedUserSymbols = 0; if (pfx->UserSymbols) pfx->UserSymbols = (UserSymbolT*) RelinquishMagickMemory (pfx->UserSymbols); } static void DestroyFxRt (fxRtT * pfxrt) { pfxrt->usedValStack = 0; if (pfxrt->ValStack) pfxrt->ValStack = (fxFltType*) RelinquishMagickMemory (pfxrt->ValStack); if (pfxrt->UserSymVals) pfxrt->UserSymVals = (fxFltType*) RelinquishMagickMemory (pfxrt->UserSymVals); pfxrt->random_info = DestroyRandomInfo (pfxrt->random_info); } static size_t GetToken (FxInfo * pfx) /* Returns length of token that starts with an alpha, or 0 if it isn't a token that starts with an alpha. j0 and j1 have trailing digit. Also colours like "gray47" have more trailing digits. After intial alpha(s) also allow single "_", eg "standard_deviation". Does not advance pfx->pex. This splits "mean.r" etc. */ { char * p = pfx->pex; size_t len = 0; *pfx->token = '\0'; pfx->lenToken = 0; if (!isalpha((int)*p)) return 0; /* Regard strings that start "icc-" or "device-", followed by any number of alphas, as a token. */ if (LocaleNCompare (p, "icc-", 4) == 0) { len = 4; p += 4; while (isalpha ((int)*p)) { len++; p++; } } else if (LocaleNCompare (p, "device-", 7) == 0) { len = 7; p += 7; while (isalpha ((int)*p)) { len++; p++; } } else { while (isalpha ((int)*p)) { len++; p++; } if (*p == '_') { len++; p++; } while (isalpha ((int)*p)) { len++; p++; } while (isdigit ((int)*p)) { len++; p++; } } if (len >= MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetToken: too long", "%g at '%s'", (double) len, SetShortExp(pfx)); len = MaxTokenLen; } if (len) { (void) CopyMagickString (pfx->token, pfx->pex, (len+1<MaxTokenLen)?len+1:MaxTokenLen); } pfx->lenToken = strlen (pfx->token); return len; } static MagickBooleanType TokenMaybeUserSymbol (FxInfo * pfx) { char * p = pfx->token; int i = 0; while (*p) { if (!isalpha ((int)*p++)) return MagickFalse; i++; } if (i < 2) return MagickFalse; return MagickTrue; } static MagickBooleanType AddElement (FxInfo * pfx, fxFltType val, int oprNum) { ElementT * pel; assert (oprNum <= rNull); if (++pfx->usedElements >= pfx->numElements) { if (!ExtendRPN (pfx)) return MagickFalse; } pel = &pfx->Elements[pfx->usedElements-1]; pel->type = TypeOfOpr (oprNum); pel->val = val; pel->val1 = (fxFltType) 0; pel->val2 = (fxFltType) 0; pel->oprNum = oprNum; pel->DoPush = MagickTrue; pel->EleNdx = 0; pel->ChannelQual = NO_CHAN_QUAL; pel->ImgAttrQual = aNull; pel->nDest = 0; pel->pExpStart = NULL; pel->lenExp = 0; if (oprNum <= oNull) pel->nArgs = Operators[oprNum].nArgs; else if (oprNum <= fNull) pel->nArgs = Functions[oprNum-FirstFunc].nArgs; else if (oprNum <= aNull) pel->nArgs = 0; else if (oprNum <= sNull) pel->nArgs = 0; else pel->nArgs = Controls[oprNum-FirstCont].nArgs; return MagickTrue; } static MagickBooleanType AddAddressingElement (FxInfo * pfx, int oprNum, int EleNdx) { ElementT * pel; if (!AddElement (pfx, (fxFltType) 0, oprNum)) return MagickFalse; pel = &pfx->Elements[pfx->usedElements-1]; pel->EleNdx = EleNdx; if (oprNum == rGoto || oprNum == rIfZeroGoto || oprNum == rIfNotZeroGoto || oprNum == rZerStk) { pel->DoPush = MagickFalse; } /* Note: for() may or may not need pushing, depending on whether the value is needed, eg "for(...)+2" or debug(for(...)). */ return MagickTrue; } static MagickBooleanType AddColourElement (FxInfo * pfx, fxFltType val0, fxFltType val1, fxFltType val2) { ElementT * pel; if (!AddElement (pfx, val0, oNull)) return MagickFalse; pel = &pfx->Elements[pfx->usedElements-1]; pel->val1 = val1; pel->val2 = val2; pel->type = etColourConstant; return MagickTrue; } static void inline SkipSpaces (FxInfo * pfx) { while (isspace ((int)*pfx->pex)) pfx->pex++; } static char inline PeekChar (FxInfo * pfx) { SkipSpaces (pfx); return *pfx->pex; } static MagickBooleanType inline PeekStr (FxInfo * pfx, const char * str) { SkipSpaces (pfx); return (LocaleNCompare (pfx->pex, str, strlen(str))==0 ? MagickTrue : MagickFalse); } static MagickBooleanType ExpectChar (FxInfo * pfx, char c) { if (PeekChar (pfx) != c) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected char", "'%c' at '%s'", c, SetShortExp (pfx)); return MagickFalse; } pfx->pex++; return MagickTrue; } static int MaybeXYWH (FxInfo * pfx, ImgAttrE * pop) /* If ".x" or ".y" or ".width" or ".height" increments *pop and returns 1 to 4 . Otherwise returns 0. */ { int ret=0; if (*pop != aPage && *pop != aPrintsize && *pop != aRes) return 0; if (PeekChar (pfx) != '.') return 0; if (!ExpectChar (pfx, '.')) return 0; (void) GetToken (pfx); if (LocaleCompare ("x", pfx->token)==0) ret=1; else if (LocaleCompare ("y", pfx->token)==0) ret=2; else if (LocaleCompare ("width", pfx->token)==0) ret=3; else if (LocaleCompare ("height", pfx->token)==0) ret=4; if (!ret) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Invalid 'x' or 'y' or 'width' or 'height' token=", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); if (*pop == aPage) (*pop) = (ImgAttrE) (*pop + ret); else { if (ret > 2) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Invalid 'width' or 'height' token=", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); } else { (*pop) = (ImgAttrE) (*pop + ret); } } pfx->pex+=pfx->lenToken; return ret; } static MagickBooleanType ExtendOperatorStack (FxInfo * pfx) { pfx->numOprStack = (int) ceil (pfx->numOprStack * (1 + TableExtend)); pfx->OperatorStack = (OperatorE*) ResizeMagickMemory (pfx->OperatorStack, pfx->numOprStack * sizeof(OperatorE)); if (!pfx->OperatorStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "OprStack", "%i", pfx->numOprStack); return MagickFalse; } return MagickTrue; } static MagickBooleanType PushOperatorStack (FxInfo * pfx, int op) { if (++pfx->usedOprStack >= pfx->numOprStack) { if (!ExtendOperatorStack (pfx)) return MagickFalse; } pfx->OperatorStack[pfx->usedOprStack-1] = (OperatorE) op; if (pfx->maxUsedOprStack < pfx->usedOprStack) pfx->maxUsedOprStack = pfx->usedOprStack; return MagickTrue; } static OperatorE GetLeadingOp (FxInfo * pfx) { OperatorE op = oNull; if (*pfx->pex == '-') op = oUnaryMinus; else if (*pfx->pex == '+') op = oUnaryPlus; else if (*pfx->pex == '~') op = oBitNot; else if (*pfx->pex == '!') op = oLogNot; else if (*pfx->pex == '(') op = oOpenParen; return op; } static MagickBooleanType inline OprIsUnaryPrefix (OperatorE op) { return (op == oUnaryMinus || op == oUnaryPlus || op == oBitNot || op == oLogNot ? MagickTrue : MagickFalse); } static MagickBooleanType TopOprIsUnaryPrefix (FxInfo * pfx) { if (!pfx->usedOprStack) return MagickFalse; return OprIsUnaryPrefix (pfx->OperatorStack[pfx->usedOprStack-1]); } static MagickBooleanType PopOprOpenParen (FxInfo * pfx, OperatorE op) { if (!pfx->usedOprStack) return MagickFalse; if (pfx->OperatorStack[pfx->usedOprStack-1] != op) return MagickFalse; pfx->usedOprStack--; return MagickTrue; } static int GetCoordQualifier (FxInfo * pfx, int op) /* Returns -1 if invalid CoordQualifier, +1 if valid and appropriate. */ { if (op != fU && op != fV && op != fS) return -1; (void) GetToken (pfx); if (pfx->lenToken != 1) { return -1; } if (*pfx->token != 'p' && *pfx->token != 'P') return -1; if (!GetFunction (pfx, fP)) return -1; return 1; } static PixelChannel GetChannelQualifier (FxInfo * pfx, int op) { if (op == fU || op == fV || op == fP || op == fUP || op == fVP || op == fS || (op >= FirstImgAttr && op <= aNull) ) { const ChannelT * pch = &Channels[0]; (void) GetToken (pfx); while (*pch->str) { if (LocaleCompare (pch->str, pfx->token)==0) { if (op >= FirstImgAttr && op <= (OperatorE)aNull && (pch->pixChan == HUE_CHANNEL || pch->pixChan == SAT_CHANNEL || pch->pixChan == LIGHT_CHANNEL) ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Can't have image attribute with HLS qualifier at", "'%s'", SetShortExp(pfx)); return NO_CHAN_QUAL; } pfx->pex += pfx->lenToken; return pch->pixChan; } pch++; } } return NO_CHAN_QUAL; } static ImgAttrE GetImgAttrToken (FxInfo * pfx) { ImgAttrE ia = aNull; const char * iaStr; for (ia = FirstImgAttr; ia < aNull; ia=(ImgAttrE) (ia+1)) { iaStr = ImgAttrs[ia-FirstImgAttr].str; if (LocaleCompare (iaStr, pfx->token)==0) { pfx->pex += strlen(pfx->token); if (ImgAttrs[ia-FirstImgAttr].NeedStats == 1) pfx->NeedStats = MagickTrue; MaybeXYWH (pfx, &ia); break; } } if (ia == aPage || ia == aPrintsize || ia == aRes) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attribute", "'%s' needs qualifier at '%s'", iaStr, SetShortExp(pfx)); } return ia; } static ImgAttrE GetImgAttrQualifier (FxInfo * pfx, int op) { ImgAttrE ia = aNull; if (op == (OperatorE)fU || op == (OperatorE)fV || op == (OperatorE)fP || op == (OperatorE)fS) { (void) GetToken (pfx); if (pfx->lenToken == 0) { return aNull; } ia = GetImgAttrToken (pfx); } return ia; } static MagickBooleanType IsQualifier (FxInfo * pfx) { if (PeekChar (pfx) == '.') { pfx->pex++; return MagickTrue; } return MagickFalse; } static ssize_t GetProperty (FxInfo * pfx, fxFltType *val) /* returns number of character to swallow. "-1" means invalid input "0" means no relevant input (don't swallow, but not an error) */ { if (PeekStr (pfx, "%[")) { int level = 0; size_t len; char sProperty [MagickPathExtent]; char * p = pfx->pex + 2; while (*p) { if (*p == '[') level++; else if (*p == ']') { if (level == 0) break; level--; } p++; } if (!*p || level != 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After '%[' expected ']' at", "'%s'", SetShortExp(pfx)); return -1; } len = (size_t) (p - pfx->pex + 1); if (len > MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Too much text between '%[' and ']' at", "'%s'", SetShortExp(pfx)); return -1; } (void) CopyMagickString (sProperty, pfx->pex, len+1); sProperty[len] = '\0'; { char * tailptr; char * text; text = InterpretImageProperties (pfx->image->image_info, pfx->image, sProperty, pfx->exception); if (!text || !*text) { text = DestroyString(text); (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Unknown property", "'%s' at '%s'", sProperty, SetShortExp(pfx)); return -1; } *val = strtold (text, &tailptr); if (text == tailptr) { text = DestroyString(text); (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Property", "'%s' text '%s' is not a number at '%s'", sProperty, text, SetShortExp(pfx)); return -1; } text = DestroyString(text); } return ((ssize_t) len); } return 0; } static ssize_t inline GetConstantColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2) /* Finds named colour such as "blue" and colorspace function such as "lab(10,20,30)". Returns number of characters to swallow. Return -1 means apparantly a constant colour, but with an error. Return 0 means not a constant colour, but not an error. */ { PixelInfo colour; ExceptionInfo *dummy_exception = AcquireExceptionInfo (); char *p; MagickBooleanType IsGray, IsIcc, IsDev; char ColSp[MagickPathExtent]; (void) CopyMagickString (ColSp, pfx->token, MaxTokenLen); p = ColSp + pfx->lenToken - 1; if (*p == 'a' || *p == 'A') *p = '\0'; (void) GetPixelInfo (pfx->image, &colour); /* "gray" is both a colorspace and a named colour. */ IsGray = (LocaleCompare (ColSp, "gray") == 0) ? MagickTrue : MagickFalse; IsIcc = (LocaleCompare (ColSp, "icc-color") == 0) ? MagickTrue : MagickFalse; IsDev = (LocaleNCompare (ColSp, "device-", 7) == 0) ? MagickTrue : MagickFalse; /* QueryColorCompliance will raise a warning if it isn't a colour, so we discard any exceptions. */ if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, dummy_exception) || IsGray) { ssize_t type = ParseCommandOption (MagickColorspaceOptions, MagickFalse, ColSp); if (type >= 0 || IsIcc || IsDev) { char * q = pfx->pex + pfx->lenToken; while (isspace((int) ((unsigned char) *q))) q++; if (*q == '(') { size_t lenfun; char sFunc[MagickPathExtent]; while (*q && *q != ')') q++; if (!*q) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "constant color missing ')'", "at '%s'", SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } lenfun = (size_t) (q - pfx->pex + 1); if (lenfun > MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "lenfun too long", "'%lu' at '%s'", lenfun, SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } (void) CopyMagickString (sFunc, pfx->pex, lenfun+1); if (QueryColorCompliance (sFunc, AllCompliance, &colour, dummy_exception)) { *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; dummy_exception = DestroyExceptionInfo (dummy_exception); return (ssize_t)lenfun; } } else { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "colorspace but not a valid color with '(...)' at", "'%s'", SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } } if (!IsGray) { dummy_exception = DestroyExceptionInfo (dummy_exception); return 0; } } *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; dummy_exception = DestroyExceptionInfo (dummy_exception); return (ssize_t)strlen (pfx->token); } static ssize_t inline GetHexColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2) /* Returns number of characters to swallow. Negative return means it starts with '#', but invalid hex number. */ { char * p; size_t len; PixelInfo colour; if (*pfx->pex != '#') return 0; /* find end of hex digits. */ p = pfx->pex + 1; while (isxdigit ((int)*p)) p++; if (isalpha ((int)*p)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad hex number at", "'%s'", SetShortExp(pfx)); return -1; } len = (size_t) (p - pfx->pex); if (len < 1) return 0; if (len >= MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Hex colour too long at", "'%s'", SetShortExp(pfx)); return -1; } (void) CopyMagickString (pfx->token, pfx->pex, len+1); (void) GetPixelInfo (pfx->image, &colour); if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "QueryColorCompliance rejected", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return -1; } *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; return (ssize_t) len; } static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe) { /* A function, so get open-parens, n args, close-parens */ const char * funStr = Functions[fe-FirstFunc].str; int nArgs = Functions[fe-FirstFunc].nArgs; char chLimit = ')'; char expChLimit = ')'; const char *strLimit = ",)"; OperatorE pushOp = oOpenParen; char * pExpStart; int lenExp = 0; int FndArgs = 0; int ndx0 = NULL_ADDRESS, ndx1 = NULL_ADDRESS, ndx2 = NULL_ADDRESS, ndx3 = NULL_ADDRESS; MagickBooleanType coordQual = MagickFalse; PixelChannel chQual = NO_CHAN_QUAL; ImgAttrE iaQual = aNull; pfx->pex += pfx->lenToken; if (fe == fP) { char p = PeekChar (pfx); if (p=='{') { (void) ExpectChar (pfx, '{'); pushOp = oOpenBrace; strLimit = ",}"; chLimit = '}'; expChLimit = '}'; } else if (p=='[') { (void) ExpectChar (pfx, '['); pushOp = oOpenBracket; strLimit = ",]"; chLimit = ']'; expChLimit = ']'; } else { nArgs = 0; chLimit = ']'; expChLimit = ']'; } } else if (fe == fU) { char p = PeekChar (pfx); if (p=='[') { (void) ExpectChar (pfx, '['); pushOp = oOpenBracket; strLimit = ",]"; chLimit = ']'; expChLimit = ']'; } else { nArgs = 0; chLimit = ']'; expChLimit = ']'; } } else if (fe == fV || fe == fS) { nArgs = 0; pushOp = oOpenBracket; chLimit = ']'; expChLimit = ']'; } else { if (!ExpectChar (pfx, '(')) return MagickFalse; } if (!PushOperatorStack (pfx, pushOp)) return MagickFalse; pExpStart = pfx->pex; ndx0 = pfx->usedElements; if (fe==fDo) { (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx1+1 */ } while (nArgs > 0) { int FndOne = 0; if (TranslateStatementList (pfx, strLimit, &chLimit)) { FndOne = 1; } else { /* Maybe don't break because other expressions may be not empty. */ if (!chLimit) break; if (fe == fP || fe == fS|| fe == fIf) { (void) AddElement (pfx, (fxFltType) 0, oNull); FndOne = 1; } } if (strchr (strLimit, chLimit)==NULL) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected one of '%s' after expression but found '%c' at '%s'", funStr, strLimit, chLimit ? chLimit : ' ', SetShortExp(pfx)); return MagickFalse; } if (FndOne) { FndArgs++; nArgs--; } switch (FndArgs) { case 1: if (ndx1 != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' required argument is missing at '%s'", funStr, SetShortExp(pfx)); return MagickFalse; } ndx1 = pfx->usedElements; if (fe==fWhile) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */ } else if (fe==fDo) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */ } else if (fe==fFor) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; } else if (fe==fIf) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2 + 1 */ pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from if() */ } break; case 2: if (ndx2 != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' required argument is missing at '%s'", funStr, SetShortExp(pfx)); return MagickFalse; } ndx2 = pfx->usedElements; if (fe==fWhile) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx0); } else if (fe==fDo) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx0 + 1); } else if (fe==fFor) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx3 */ pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from for() */ (void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS); } else if (fe==fIf) { (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx3 */ } break; case 3: if (ndx3 != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' required argument is missing at '%s'", funStr, SetShortExp(pfx)); return MagickFalse; } if (fe==fFor) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx1); } ndx3 = pfx->usedElements; break; default: break; } if (chLimit == expChLimit) { lenExp = pfx->pex - pExpStart - 1; break; } } /* end while args of a function */ if (chLimit && chLimit != expChLimit && chLimit != ',' ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected '%c', found '%c' at '%s'", funStr, expChLimit, chLimit ? chLimit : ' ', SetShortExp(pfx)); return MagickFalse; } if (fe == fP || fe == fS || fe == fU) { while (FndArgs < Functions[fe-FirstFunc].nArgs) { (void) AddElement (pfx, (fxFltType) 0, oNull); FndArgs++; } } if (FndArgs > Functions[fe-FirstFunc].nArgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected %i arguments, found '%i' at '%s'", funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx)); return MagickFalse; } if (FndArgs < Functions[fe-FirstFunc].nArgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected %i arguments, found too few (%i) at '%s'", funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx)); return MagickFalse; } if (fe != fS && fe != fV && FndArgs == 0 && Functions[fe-FirstFunc].nArgs == 0) { /* This is for "rand()" and similar. */ chLimit = expChLimit; if (!ExpectChar (pfx, ')')) return MagickFalse; } if (chLimit != expChLimit) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', arguments don't end with '%c' at '%s'", funStr, expChLimit, SetShortExp(pfx)); return MagickFalse; } if (!PopOprOpenParen (pfx, pushOp)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: For function", "'%s' tos not '%s' at '%s'", funStr, Operators[pushOp].str, SetShortExp(pfx)); return MagickFalse; } if (IsQualifier (pfx)) { if (fe == fU || fe == fV || fe == fS) { coordQual = (GetCoordQualifier (pfx, fe) == 1) ? MagickTrue : MagickFalse; if (coordQual) { /* Remove last element, which should be fP */ ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->oprNum != fP) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: For function", "'%s' last element not 'p' at '%s'", funStr, SetShortExp(pfx)); return MagickFalse; } chQual = pel->ChannelQual; expChLimit = (pel->IsRelative) ? ']' : '}'; pfx->usedElements--; if (fe == fU) fe = fUP; else if (fe == fV) fe = fVP; else if (fe == fS) fe = fSP; funStr = Functions[fe-FirstFunc].str; } } if ( chQual == NO_CHAN_QUAL && (fe == fP || fe == fS || fe == fSP || fe == fU || fe == fUP || fe == fV || fe == fVP) ) { chQual = GetChannelQualifier (pfx, fe); } if (chQual == NO_CHAN_QUAL && (fe == fU || fe == fV || fe == fS)) { /* Note: we don't allow "p.mean" etc. */ iaQual = GetImgAttrQualifier (pfx, fe); } if (IsQualifier (pfx) && chQual == NO_CHAN_QUAL && iaQual != aNull) { chQual = GetChannelQualifier (pfx, fe); } if (coordQual && iaQual != aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', can't have qualifiers 'p' and image attribute '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!coordQual && chQual == NO_CHAN_QUAL && iaQual == aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', bad qualifier '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!coordQual && chQual == CompositePixelChannel && iaQual == aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', bad composite qualifier '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (chQual == HUE_CHANNEL || chQual == SAT_CHANNEL || chQual == LIGHT_CHANNEL) { pfx->NeedHsl = MagickTrue; if (iaQual >= FirstImgAttr && iaQual < aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Can't have image attribute with HLS qualifier at", "'%s'", SetShortExp(pfx)); return MagickFalse; } } } if (fe==fWhile) { pfx->Elements[ndx1].EleNdx = ndx2+1; } else if (fe==fDo) { pfx->Elements[ndx0].EleNdx = ndx1+1; pfx->Elements[ndx1].EleNdx = ndx2+1; } else if (fe==fFor) { pfx->Elements[ndx2].EleNdx = ndx3; } else if (fe==fIf) { pfx->Elements[ndx1].EleNdx = ndx2 + 1; pfx->Elements[ndx2].EleNdx = ndx3; } else { if (fe == fU && iaQual == aNull) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->type == etConstant && pel->val == 0.0) { pfx->usedElements--; fe = fU0; } } (void) AddElement (pfx, (fxFltType) 0, fe); if (fe == fP || fe == fU || fe == fU0 || fe == fUP || fe == fV || fe == fVP || fe == fS || fe == fSP) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; pel->IsRelative = (expChLimit == ']' ? MagickTrue : MagickFalse); if (chQual >= 0) pel->ChannelQual = chQual; if (iaQual != aNull && (fe == fU || fe == fV || fe == fS)) { /* Note: we don't allow "p[2,3].mean" or "p.mean" etc. */ pel->ImgAttrQual = iaQual; } } } if (pExpStart && lenExp) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; pel->pExpStart = pExpStart; pel->lenExp = lenExp; } if (fe == fDebug) pfx->ContainsDebug = MagickTrue; return MagickTrue; } static MagickBooleanType IsStealth (int op) { return (op == fU0 || op == fUP || op == fSP || op == fVP || (op >= FirstCont && op <= rNull) ? MagickTrue : MagickFalse ); } static MagickBooleanType GetOperand ( FxInfo * pfx, MagickBooleanType * UserSymbol, MagickBooleanType * NewUserSymbol, int * UserSymNdx, MagickBooleanType * needPopAll) { *NewUserSymbol = *UserSymbol = MagickFalse; *UserSymNdx = NULL_ADDRESS; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; (void) GetToken (pfx); if (pfx->lenToken==0) { /* Try '(' or unary prefix */ OperatorE op = GetLeadingOp (pfx); if (op==oOpenParen) { char chLimit = '\0'; if (!PushOperatorStack (pfx, op)) return MagickFalse; pfx->pex++; if (!TranslateExpression (pfx, ")", &chLimit, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Empty expression in parentheses at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (chLimit != ')') { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'(' but no ')' at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Top of opr stack should be '('. */ if (!PopOprOpenParen (pfx, oOpenParen)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: tos not '(' at", "'%s'", SetShortExp(pfx)); return MagickFalse; } return MagickTrue; } else if (OprIsUnaryPrefix (op)) { if (!PushOperatorStack (pfx, op)) return MagickFalse; pfx->pex++; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; if (!GetOperand (pfx, UserSymbol, NewUserSymbol, UserSymNdx, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After unary, bad operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (*NewUserSymbol) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After unary, NewUserSymbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (*UserSymbol) { (void) AddAddressingElement (pfx, rCopyFrom, *UserSymNdx); *UserSymNdx = NULL_ADDRESS; *UserSymbol = MagickFalse; *NewUserSymbol = MagickFalse; } (void) GetToken (pfx); return MagickTrue; } else if (*pfx->pex == '#') { fxFltType v0=0, v1=0, v2=0; ssize_t lenToken = GetHexColour (pfx, &v0, &v1, &v2); if (lenToken < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad hex number at", "'%s'", SetShortExp(pfx)); return MagickFalse; } else if (lenToken > 0) { (void) AddColourElement (pfx, v0, v1, v2); pfx->pex+=lenToken; } return MagickTrue; } /* Try a constant number. */ { char * tailptr; ssize_t lenOptArt; fxFltType val = strtold (pfx->pex, &tailptr); if (pfx->pex != tailptr) { pfx->pex = tailptr; if (*tailptr) { /* Could have "prefix" K, Ki, M etc. See https://en.wikipedia.org/wiki/Metric_prefix and https://en.wikipedia.org/wiki/Binary_prefix */ double Pow = 0.0; const char Prefices[] = "yzafpnum.kMGTPEZY"; const char * pSi = strchr (Prefices, *tailptr); if (pSi && *pSi != '.') Pow = (pSi - Prefices) * 3 - 24; else if (*tailptr == 'c') Pow = -2; else if (*tailptr == 'h') Pow = 2; else if (*tailptr == 'k') Pow = 3; if (Pow != 0.0) { if (*(++pfx->pex) == 'i') { val *= pow (2.0, Pow/0.3); pfx->pex++; } else { val *= pow (10.0, Pow); } } } (void) AddElement (pfx, val, oNull); return MagickTrue; } val = (fxFltType) 0; lenOptArt = GetProperty (pfx, &val); if (lenOptArt < 0) return MagickFalse; if (lenOptArt > 0) { (void) AddElement (pfx, val, oNull); pfx->pex += lenOptArt; return MagickTrue; } } } /* end of lenToken==0 */ if (pfx->lenToken > 0) { /* Try a constant */ { ConstantE ce; for (ce = (ConstantE)0; ce < cNull; ce=(ConstantE) (ce+1)) { const char * ceStr = Constants[ce].str; if (LocaleCompare (ceStr, pfx->token)==0) { break; } } if (ce != cNull) { (void) AddElement (pfx, Constants[ce].val, oNull); pfx->pex += pfx->lenToken; return MagickTrue; } } /* Try a function */ { FunctionE fe; for (fe = FirstFunc; fe < fNull; fe=(FunctionE) (fe+1)) { const char * feStr = Functions[fe-FirstFunc].str; if (LocaleCompare (feStr, pfx->token)==0) { break; } } if (fe == fV && pfx->ImgListLen < 2) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Symbol 'v' but fewer than two images at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (IsStealth (fe)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Function", "'%s' not permitted at '%s'", pfx->token, SetShortExp(pfx)); } if (fe == fDo || fe == fFor || fe == fIf || fe == fWhile) { *needPopAll = MagickTrue; } if (fe != fNull) return (GetFunction (pfx, fe)); } /* Try image attribute */ { ImgAttrE ia = GetImgAttrToken (pfx); if (ia != aNull) { fxFltType val = 0; (void) AddElement (pfx, val, ia); if (ImgAttrs[ia-FirstImgAttr].NeedStats==1) { if (IsQualifier (pfx)) { PixelChannel chQual = GetChannelQualifier (pfx, ia); ElementT * pel; if (chQual == NO_CHAN_QUAL) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad channel qualifier at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Adjust the element */ pel = &pfx->Elements[pfx->usedElements-1]; pel->ChannelQual = chQual; } } return MagickTrue; } } /* Try symbol */ { SymbolE se; for (se = FirstSym; se < sNull; se=(SymbolE) (se+1)) { const char * seStr = Symbols[se-FirstSym].str; if (LocaleCompare (seStr, pfx->token)==0) { break; } } if (se != sNull) { fxFltType val = 0; (void) AddElement (pfx, val, se); pfx->pex += pfx->lenToken; if (se==sHue || se==sSaturation || se==sLightness) pfx->NeedHsl = MagickTrue; return MagickTrue; } } /* Try constant colour. */ { fxFltType v0, v1, v2; ssize_t ColLen = GetConstantColour (pfx, &v0, &v1, &v2); if (ColLen < 0) return MagickFalse; if (ColLen > 0) { (void) AddColourElement (pfx, v0, v1, v2); pfx->pex+=ColLen; return MagickTrue; } } /* Try image artifact. */ { const char *artifact; artifact = GetImageArtifact (pfx->image, pfx->token); if (artifact != (const char *) NULL) { char * tailptr; fxFltType val = strtold (artifact, &tailptr); if (pfx->token == tailptr) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Artifact", "'%s' has value '%s', not a number, at '%s'", pfx->token, artifact, SetShortExp(pfx)); return MagickFalse; } (void) AddElement (pfx, val, oNull); pfx->pex+=pfx->lenToken; return MagickTrue; } } /* Try user symbols. If it is, don't AddElement yet. */ if (TokenMaybeUserSymbol (pfx)) { *UserSymbol = MagickTrue; *UserSymNdx = FindUserSymbol (pfx, pfx->token); if (*UserSymNdx == NULL_ADDRESS) { *UserSymNdx = AddUserSymbol (pfx, pfx->pex, pfx->lenToken); *NewUserSymbol = MagickTrue; } else { } pfx->pex += pfx->lenToken; return MagickTrue; } } (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } static MagickBooleanType inline IsRealOperator (OperatorE op) { return (op < oOpenParen || op > oCloseBrace) ? MagickTrue : MagickFalse; } static MagickBooleanType inline ProcessTernaryOpr (FxInfo * pfx, TernaryT * ptern) /* Ternary operator "... ? ... : ..." returns false iff we have exception */ { if (pfx->usedOprStack == 0) return MagickFalse; if (pfx->OperatorStack[pfx->usedOprStack-1] == oQuery) { if (ptern->addrQuery != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have '?' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have ':' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; ptern->addrQuery = pfx->usedElements; (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be one after the Colon address. */ } else if (pfx->OperatorStack[pfx->usedOprStack-1] == oColon) { if (ptern->addrQuery == NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Need '?' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have ':' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; ptern->addrColon = pfx->usedElements; pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be after the subexpression */ } return MagickTrue; } static MagickBooleanType GetOperator ( FxInfo * pfx, MagickBooleanType * Assign, MagickBooleanType * Update, MagickBooleanType * IncrDecr) { OperatorE op; size_t len = 0; MagickBooleanType DoneIt = MagickFalse; SkipSpaces (pfx); for (op = (OperatorE)0; op != oNull; op=(OperatorE) (op+1)) { const char * opStr = Operators[op].str; len = strlen(opStr); if (LocaleNCompare (opStr, pfx->pex, len)==0) { break; } } if (!IsRealOperator (op)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Not a real operator at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (op==oNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operator at", "'%s'", SetShortExp(pfx)); return MagickFalse; } *Assign = (op==oAssign) ? MagickTrue : MagickFalse; *Update = OprInPlace (op); *IncrDecr = (op == oPlusPlus || op == oSubSub) ? MagickTrue : MagickFalse; /* while top of OperatorStack is not empty and is not open-parens or assign, and top of OperatorStack is higher precedence than new op, then move top of OperatorStack to Element list. */ while (pfx->usedOprStack > 0) { OperatorE top = pfx->OperatorStack[pfx->usedOprStack-1]; int precTop, precNew; if (top == oOpenParen || top == oAssign || OprInPlace (top)) break; precTop = Operators[top].precedence; precNew = Operators[op].precedence; /* Assume left associativity. If right assoc, this would be "<=". */ if (precTop < precNew) break; (void) AddElement (pfx, (fxFltType) 0, top); pfx->usedOprStack--; } /* If new op is close paren, and stack top is open paren, remove stack top. */ if (op==oCloseParen) { if (pfx->usedOprStack == 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Found ')' but nothing on stack at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (pfx->OperatorStack[pfx->usedOprStack-1] != oOpenParen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Found ')' but no '(' on stack at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; DoneIt = MagickTrue; } if (!DoneIt) { if (!PushOperatorStack (pfx, op)) return MagickFalse; } pfx->pex += len; return MagickTrue; } static MagickBooleanType ResolveTernaryAddresses (FxInfo * pfx, TernaryT * ptern) { if (ptern->addrQuery == NULL_ADDRESS && ptern->addrColon == NULL_ADDRESS) return MagickTrue; if (ptern->addrQuery != NULL_ADDRESS && ptern->addrColon != NULL_ADDRESS) { pfx->Elements[ptern->addrQuery].EleNdx = ptern->addrColon + 1; pfx->Elements[ptern->addrColon].EleNdx = pfx->usedElements; ptern->addrQuery = NULL_ADDRESS; ptern->addrColon = NULL_ADDRESS; } else if (ptern->addrQuery != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'?' with no corresponding ':'", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } else if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "':' with no corresponding '?'", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } return MagickTrue; } static MagickBooleanType TranslateExpression ( FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll) { /* There should be only one New per expression (oAssign), but can be many Old. */ MagickBooleanType UserSymbol, NewUserSymbol; int UserSymNdx0, UserSymNdx1; MagickBooleanType Assign = MagickFalse, Update = MagickFalse, IncrDecr = MagickFalse; int StartEleNdx; TernaryT ternary; ternary.addrQuery = NULL_ADDRESS; ternary.addrColon = NULL_ADDRESS; pfx->teDepth++; *chLimit = '\0'; StartEleNdx = pfx->usedElements-1; if (StartEleNdx < 0) StartEleNdx = 0; SkipSpaces (pfx); if (!*pfx->pex) { pfx->teDepth--; return MagickFalse; } if (strchr(strLimit,*pfx->pex)!=NULL) { *chLimit = *pfx->pex; pfx->pex++; pfx->teDepth--; return MagickFalse; } if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx0, needPopAll)) return MagickFalse; SkipSpaces (pfx); /* Loop through Operator, Operand, Operator, Operand, ... */ while (*pfx->pex && (!*strLimit || (strchr(strLimit,*pfx->pex)==NULL))) { if (!GetOperator (pfx, &Assign, &Update, &IncrDecr)) return MagickFalse; SkipSpaces (pfx); if (NewUserSymbol && !Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected assignment after new UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!UserSymbol && Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attempted assignment to non-UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!UserSymbol && Update) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attempted update to non-UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (UserSymbol && (Assign || Update) && !IncrDecr) { if (!TranslateExpression (pfx, strLimit, chLimit, needPopAll)) return MagickFalse; if (!*pfx->pex) break; if (!*strLimit) break; if (strchr(strLimit,*chLimit)!=NULL) break; } if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) { ElementT * pel; (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0); UserSymNdx0 = NULL_ADDRESS; pel = &pfx->Elements[pfx->usedElements-1]; pel->DoPush = MagickTrue; } if (UserSymbol) { while (TopOprIsUnaryPrefix (pfx)) { OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1]; (void) AddElement (pfx, (fxFltType) 0, op); pfx->usedOprStack--; } } if (!ProcessTernaryOpr (pfx, &ternary)) return MagickFalse; if (ternary.addrColon != NULL_ADDRESS) { if (!TranslateExpression (pfx, ",);", chLimit, needPopAll)) return MagickFalse; break; } UserSymbol = NewUserSymbol = MagickFalse; if ( (!*pfx->pex) || (*strLimit && (strchr(strLimit,*pfx->pex)!=NULL) ) ) { if (IncrDecr) break; (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand after operator", "at '%s'", SetShortExp(pfx)); return MagickFalse; } if (IncrDecr) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'++' and '--' must be the final operators in an expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx1, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } SkipSpaces (pfx); if (NewUserSymbol && !Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "NewUserSymbol", "'%s' after non-assignment operator at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (UserSymbol && !NewUserSymbol) { (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx1); UserSymNdx1 = NULL_ADDRESS; } UserSymNdx0 = UserSymNdx1; } if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) { ElementT * pel; if (NewUserSymbol) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "NewUserSymbol", "'%s' needs assignment operator at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0); pel = &pfx->Elements[pfx->usedElements-1]; pel->DoPush = MagickTrue; } if (*pfx->pex && !*chLimit && (strchr(strLimit,*pfx->pex)!=NULL)) { *chLimit = *pfx->pex; pfx->pex++; } while (pfx->usedOprStack) { OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1]; if (op == oOpenParen || op == oOpenBracket || op == oOpenBrace) { break; } if ( (op==oAssign && !Assign) || (OprInPlace(op) && !Update) ) { break; } pfx->usedOprStack--; (void) AddElement (pfx, (fxFltType) 0, op); if (op == oAssign) { if (UserSymNdx0 < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Assignment to unknown user symbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Adjust last element, by deletion and add. */ pfx->usedElements--; (void) AddAddressingElement (pfx, rCopyTo, UserSymNdx0); break; } else if (OprInPlace (op)) { if (UserSymNdx0 < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Operator-in-place to unknown user symbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Modify latest element. */ pfx->Elements[pfx->usedElements-1].EleNdx = UserSymNdx0; break; } } (void) ResolveTernaryAddresses (pfx, &ternary); pfx->teDepth--; if (!pfx->teDepth && *needPopAll) { (void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS); *needPopAll = MagickFalse; } if (pfx->exception->severity != UndefinedException) return MagickFalse; return MagickTrue; } static MagickBooleanType TranslateStatement (FxInfo * pfx, char * strLimit, char * chLimit) { MagickBooleanType NeedPopAll = MagickFalse; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; if (!TranslateExpression (pfx, strLimit, chLimit, &NeedPopAll)) { return MagickFalse; } if (pfx->usedElements && *chLimit==';') { /* FIXME: not necessarily the last element, but the last _executed_ element, eg "goto" in a "for()"., Pending a fix, we will use rZerStk. */ ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->DoPush) pel->DoPush = MagickFalse; } return MagickTrue; } static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit) { #define MAX_SLIMIT 10 char sLimits[MAX_SLIMIT]; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; (void) CopyMagickString (sLimits, strLimit, MAX_SLIMIT-1); if (strchr(strLimit,';')==NULL) (void) ConcatenateMagickString (sLimits, ";", MAX_SLIMIT); for (;;) { if (!TranslateStatement (pfx, sLimits, chLimit)) return MagickFalse; if (!*pfx->pex) break; if (*chLimit != ';') { break; } } if (pfx->exception->severity != UndefinedException) return MagickFalse; return MagickTrue; } /*-------------------------------------------------------------------- Run-time */ static ChannelStatistics *CollectOneImgStats (FxInfo * pfx, Image * img) { int ch; ChannelStatistics * cs = GetImageStatistics (img, pfx->exception); /* Use RelinquishMagickMemory() somewhere. */ for (ch=0; ch <= (int) MaxPixelChannels; ch++) { cs[ch].mean *= QuantumScale; cs[ch].median *= QuantumScale; cs[ch].maxima *= QuantumScale; cs[ch].minima *= QuantumScale; cs[ch].standard_deviation *= QuantumScale; cs[ch].kurtosis *= QuantumScale; cs[ch].skewness *= QuantumScale; cs[ch].entropy *= QuantumScale; } return cs; } static MagickBooleanType CollectStatistics (FxInfo * pfx) { Image * img = GetFirstImageInList (pfx->image); size_t imgNum=0; pfx->statistics = (ChannelStatistics**) AcquireMagickMemory (pfx->ImgListLen * sizeof (ChannelStatistics *)); if (!pfx->statistics) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Statistics", "%lu", pfx->ImgListLen); return MagickFalse; } for (;;) { pfx->statistics[imgNum] = CollectOneImgStats (pfx, img); if (++imgNum == pfx->ImgListLen) break; img = GetNextImageInList (img); assert (img != (Image *) NULL); } pfx->GotStats = MagickTrue; return MagickTrue; } static MagickBooleanType inline PushVal (FxInfo * pfx, fxRtT * pfxrt, fxFltType val, int addr) { if (pfxrt->usedValStack >=pfxrt->numValStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack overflow at addr=", "%i", addr); return MagickFalse; } pfxrt->ValStack[pfxrt->usedValStack++] = val; return MagickTrue; } static inline fxFltType PopVal (FxInfo * pfx, fxRtT * pfxrt, int addr) { if (pfxrt->usedValStack <= 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack underflow at addr=", "%i", addr); return (fxFltType) 0; } return pfxrt->ValStack[--pfxrt->usedValStack]; } static inline fxFltType ImageStat ( FxInfo * pfx, ssize_t ImgNum, PixelChannel channel, ImgAttrE ia) { ChannelStatistics * cs = NULL; fxFltType ret = 0; MagickBooleanType NeedRelinq = MagickFalse; assert (channel >= 0 && channel <= MaxPixelChannels); if (pfx->GotStats) { cs = pfx->statistics[ImgNum]; } else if (pfx->NeedStats) { /* If we need more than one statistic per pixel, this is inefficient. */ cs = CollectOneImgStats (pfx, pfx->Images[ImgNum]); NeedRelinq = MagickTrue; } switch (ia) { case aDepth: ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception); break; case aExtent: ret = (fxFltType) GetBlobSize (pfx->image); break; case aKurtosis: ret = cs[channel].kurtosis; break; case aMaxima: ret = cs[channel].maxima; break; case aMean: ret = cs[channel].mean; break; case aMedian: ret = cs[channel].median; break; case aMinima: ret = cs[channel].minima; break; case aPage: /* Do nothing */ break; case aPageX: ret = (fxFltType) pfx->Images[ImgNum]->page.x; break; case aPageY: ret = (fxFltType) pfx->Images[ImgNum]->page.y; break; case aPageWid: ret = (fxFltType) pfx->Images[ImgNum]->page.width; break; case aPageHt: ret = (fxFltType) pfx->Images[ImgNum]->page.height; break; case aPrintsize: /* Do nothing */ break; case aPrintsizeX: ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.x) * pfx->Images[ImgNum]->columns; break; case aPrintsizeY: ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.y) * pfx->Images[ImgNum]->rows; break; case aQuality: ret = (fxFltType) pfx->Images[ImgNum]->quality; break; case aRes: /* Do nothing */ break; case aResX: ret = pfx->Images[ImgNum]->resolution.x; break; case aResY: ret = pfx->Images[ImgNum]->resolution.y; break; case aSkewness: ret = cs[channel].skewness; break; case aStdDev: ret = cs[channel].standard_deviation; break; case aH: ret = (fxFltType) pfx->Images[ImgNum]->rows; break; case aN: ret = (fxFltType) pfx->ImgListLen; break; case aT: /* image index in list */ ret = (fxFltType) ImgNum; break; case aW: ret = (fxFltType) pfx->Images[ImgNum]->columns; break; case aZ: ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception); break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Unknown ia=", "%i", ia); } if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs); return ret; } static fxFltType inline FxGcd (fxFltType x, fxFltType y, const size_t depth) { #define FxMaxFunctionDepth 200 if (x < y) return (FxGcd (y, x, depth+1)); if ((fabs((double) y) < 0.001) || (depth >= FxMaxFunctionDepth)) return (x); return (FxGcd (y, x-y*floor((double) (x/y)), depth+1)); } static ssize_t inline ChkImgNum (FxInfo * pfx, fxFltType f) /* Returns -1 if f is too large. */ { ssize_t i = (ssize_t) floor ((double) f + 0.5); if (i < 0) i += pfx->ImgListLen; if (i < 0 || i >= (ssize_t)pfx->ImgListLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ImgNum", "%lu bad for ImgListLen %lu", i, pfx->ImgListLen); i = -1; } return i; } #define WHICH_ATTR_CHAN \ (pel->ChannelQual == NO_CHAN_QUAL) ? CompositePixelChannel : \ (pel->ChannelQual == THIS_CHANNEL) ? channel : pel->ChannelQual #define WHICH_NON_ATTR_CHAN \ (pel->ChannelQual == NO_CHAN_QUAL || \ pel->ChannelQual == THIS_CHANNEL || \ pel->ChannelQual == CompositePixelChannel \ ) ? (channel == CompositePixelChannel ? RedPixelChannel: channel) \ : pel->ChannelQual static fxFltType GetHslFlt (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy, int channel) { Image * img = pfx->Images[ImgNum]; double red, green, blue; double hue=0, saturation=0, lightness=0; MagickBooleanType okay = MagickTrue; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, RedPixelChannel, img->interpolate, (double) fx, (double) fy, &red, pfx->exception)) okay = MagickFalse; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, GreenPixelChannel, img->interpolate, (double) fx, (double) fy, &green, pfx->exception)) okay = MagickFalse; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, BluePixelChannel, img->interpolate, (double) fx, (double) fy, &blue, pfx->exception)) okay = MagickFalse; if (!okay) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetHslFlt failure", "%lu %Lg,%Lg %i", ImgNum, fx, fy, channel); ConvertRGBToHSL ( red, green, blue, &hue, &saturation, &lightness); if (channel == HUE_CHANNEL) return hue; if (channel == SAT_CHANNEL) return saturation; if (channel == LIGHT_CHANNEL) return lightness; return 0.0; } static fxFltType GetHslInt (FxInfo * pfx, ssize_t ImgNum, const ssize_t imgx, const ssize_t imgy, int channel) { Image * img = pfx->Images[ImgNum]; double hue=0, saturation=0, lightness=0; const Quantum * p = GetCacheViewVirtualPixels (pfx->Imgs[ImgNum].View, imgx, imgy, 1, 1, pfx->exception); if (!p) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetHslInt failure", "%lu %li,%li %i", ImgNum, imgx, imgy, channel); ConvertRGBToHSL ( GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p), &hue, &saturation, &lightness); if (channel == HUE_CHANNEL) return hue; if (channel == SAT_CHANNEL) return saturation; if (channel == LIGHT_CHANNEL) return lightness; return 0.0; } static fxFltType inline GetIntensity (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy) { Quantum quantum_pixel[MaxPixelChannels]; PixelInfo pixelinf; Image * img = pfx->Images[ImgNum]; (void) GetPixelInfo (img, &pixelinf); if (!InterpolatePixelInfo (img, pfx->Imgs[pfx->ImgNum].View, img->interpolate, (double) fx, (double) fy, &pixelinf, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetIntensity failure", "%lu %Lg,%Lg", ImgNum, fx, fy); } SetPixelViaPixelInfo (img, &pixelinf, quantum_pixel); return QuantumScale * GetPixelIntensity (img, quantum_pixel); } static MagickBooleanType ExecuteRPN (FxInfo * pfx, fxRtT * pfxrt, fxFltType *result, const PixelChannel channel, const ssize_t imgx, const ssize_t imgy) { const Quantum * p = pfxrt->thisPixel; fxFltType regA=0, regB=0, regC=0, regD=0, regE=0; Image * img = pfx->image; ChannelStatistics * cs = NULL; MagickBooleanType NeedRelinq = MagickFalse; double hue=0, saturation=0, lightness=0; int i; /* For -fx, this sets p to ImgNum 0. for %[fx:...], this sets p to the currrent image. Similarly img. */ if (!p) p = GetCacheViewVirtualPixels ( pfx->Imgs[pfx->ImgNum].View, imgx, imgy, 1, 1, pfx->exception); if (pfx->GotStats) { cs = pfx->statistics[pfx->ImgNum]; } else if (pfx->NeedStats) { cs = CollectOneImgStats (pfx, pfx->Images[pfx->ImgNum]); NeedRelinq = MagickTrue; } /* Folllowing is only for expressions like "saturation", with no image specifier. */ if (pfx->NeedHsl) { ConvertRGBToHSL ( GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p), &hue, &saturation, &lightness); } for (i=0; i < pfx->usedElements; i++) { ElementT *pel = &pfx->Elements[i]; switch (pel->nArgs) { case 0: break; case 1: regA = PopVal (pfx, pfxrt, i); break; case 2: regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 3: regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 4: regD = PopVal (pfx, pfxrt, i); regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 5: regE = PopVal (pfx, pfxrt, i); regD = PopVal (pfx, pfxrt, i); regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Too many args:", "%i", pel->nArgs); break; } switch (pel->oprNum) { case oAddEq: regA = (pfxrt->UserSymVals[pel->EleNdx] += regA); break; case oSubtractEq: regA = (pfxrt->UserSymVals[pel->EleNdx] -= regA); break; case oMultiplyEq: regA = (pfxrt->UserSymVals[pel->EleNdx] *= regA); break; case oDivideEq: regA = (pfxrt->UserSymVals[pel->EleNdx] *= PerceptibleReciprocal((double)regA)); break; case oPlusPlus: regA = pfxrt->UserSymVals[pel->EleNdx]++; break; case oSubSub: regA = pfxrt->UserSymVals[pel->EleNdx]--; break; case oAdd: regA += regB; break; case oSubtract: regA -= regB; break; case oMultiply: regA *= regB; break; case oDivide: regA *= PerceptibleReciprocal((double)regB); break; case oModulus: regA = fmod ((double) regA, fabs(floor((double) regB+0.5))); break; case oUnaryPlus: /* Do nothing. */ break; case oUnaryMinus: regA = -regA; break; case oLshift: if ((size_t) (regB+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "undefined shift", "%g", (double) regB); regA = (fxFltType) 0.0; break; } regA = (fxFltType) ((size_t)(regA+0.5) << (size_t)(regB+0.5)); break; case oRshift: if ((size_t) (regB+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "undefined shift", "%g", (double) regB); regA = (fxFltType) 0.0; break; } regA = (fxFltType) ((size_t)(regA+0.5) >> (size_t)(regB+0.5)); break; case oEq: regA = fabs((double) (regA-regB)) < MagickEpsilon ? 1.0 : 0.0; break; case oNotEq: regA = fabs((double) (regA-regB)) >= MagickEpsilon ? 1.0 : 0.0; break; case oLtEq: regA = (regA <= regB) ? 1.0 : 0.0; break; case oGtEq: regA = (regA >= regB) ? 1.0 : 0.0; break; case oLt: regA = (regA < regB) ? 1.0 : 0.0; break; case oGt: regA = (regA > regB) ? 1.0 : 0.0; break; case oLogAnd: regA = (regA<=0) ? 0.0 : (regB > 0) ? 1.0 : 0.0; break; case oLogOr: regA = (regA>0) ? 1.0 : (regB > 0.0) ? 1.0 : 0.0; break; case oLogNot: regA = (regA==0) ? 1.0 : 0.0; break; case oBitAnd: regA = (fxFltType) ((size_t)(regA+0.5) & (size_t)(regB+0.5)); break; case oBitOr: regA = (fxFltType) ((size_t)(regA+0.5) | (size_t)(regB+0.5)); break; case oBitNot: /* Old fx doesn't add 0.5. */ regA = (fxFltType) (~(size_t)(regA+0.5)); break; case oPow: regA = pow ((double) regA, (double) regB); break; case oQuery: case oColon: break; case oOpenParen: case oCloseParen: case oOpenBracket: case oCloseBracket: case oOpenBrace: case oCloseBrace: break; case oAssign: pel->val = regA; break; case oNull: { if (pel->type == etColourConstant) { switch (channel) { default: case 0: regA = pel->val; break; case 1: regA = pel->val1; break; case 2: regA = pel->val2; break; } } else { regA = pel->val; } break; } case fAbs: regA = fabs ((double) regA); break; #if defined(MAGICKCORE_HAVE_ACOSH) case fAcosh: regA = acosh ((double) regA); break; #endif case fAcos: regA = acos ((double) regA); break; #if defined(MAGICKCORE_HAVE_J1) case fAiry: if (regA==0) regA = 1.0; else { fxFltType gamma = 2.0 * j1 ((MagickPI*regA)) / (MagickPI*regA); regA = gamma * gamma; } break; #endif case fAlt: regA = (fxFltType) (((ssize_t) regA) & 0x01 ? -1.0 : 1.0); break; #if defined(MAGICKCORE_HAVE_ASINH) case fAsinh: regA = asinh ((double) regA); break; #endif case fAsin: regA = asin ((double) regA); break; #if defined(MAGICKCORE_HAVE_ATANH) case fAtanh: regA = atanh ((double) regA); break; #endif case fAtan2: regA = atan2 ((double) regA, (double) regB); break; case fAtan: regA = atan ((double) regA); break; case fCeil: regA = ceil ((double) regA); break; case fChannel: switch (channel) { case 0: break; case 1: regA = regB; break; case 2: regA = regC; break; case 3: regA = regD; break; case 4: regA = regE; break; default: regA = 0.0; } break; case fClamp: if (regA < 0) regA = 0.0; else if (regA > 1.0) regA = 1.0; break; case fCosh: regA = cosh ((double) regA); break; case fCos: regA = cos ((double) regA); break; case fDebug: /* FIXME: debug() should give channel name. */ (void) fprintf (stderr, "%s[%g,%g].[%i]: %s=%.*Lg\n", img->filename, (double) imgx, (double) imgy, channel, SetPtrShortExp (pfx, pel->pExpStart, (size_t) (pel->lenExp+1)), pfx->precision, regA); break; case fDrc: regA = regA / (regB*(regA-1.0) + 1.0); break; #if defined(MAGICKCORE_HAVE_ERF) case fErf: regA = erf ((double) regA); break; #endif case fExp: regA = exp ((double) regA); break; case fFloor: regA = floor ((double) regA); break; case fGauss: regA = exp((double) (-regA*regA/2.0))/sqrt(2.0*MagickPI); break; case fGcd: if (!IsNaN(regA)) regA = FxGcd (regA, regB, 0); break; case fHypot: regA = hypot ((double) regA, (double) regB); break; case fInt: regA = floor ((double) regA); break; case fIsnan: regA = (fxFltType) (!!IsNaN (regA)); break; #if defined(MAGICKCORE_HAVE_J0) case fJ0: regA = j0 ((double) regA); break; #endif #if defined(MAGICKCORE_HAVE_J1) case fJ1: regA = j1 ((double) regA); break; #endif #if defined(MAGICKCORE_HAVE_J1) case fJinc: if (regA==0) regA = 1.0; else regA = 2.0 * j1 ((MagickPI*regA))/(MagickPI*regA); break; #endif case fLn: regA = log ((double) regA); break; case fLogtwo: regA = log10((double) regA) / log10(2.0); break; case fLog: regA = log10 ((double) regA); break; case fMax: regA = (regA > regB) ? regA : regB; break; case fMin: regA = (regA < regB) ? regA : regB; break; case fMod: regA = regA - floor((double) (regA*PerceptibleReciprocal((double) regB)))*regB; break; case fNot: regA = (fxFltType) (regA < MagickEpsilon); break; case fPow: regA = pow ((double) regA, (double) regB); break; case fRand: { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExecuteRPN) #endif regA = GetPseudoRandomValue (pfxrt->random_info); break; } case fRound: regA = floor ((double) regA + 0.5); break; case fSign: regA = (regA < 0) ? -1.0 : 1.0; break; case fSinc: regA = sin ((double) (MagickPI*regA)) / (MagickPI*regA); break; case fSinh: regA = sinh ((double) regA); break; case fSin: regA = sin ((double) regA); break; case fSqrt: regA = sqrt ((double) regA); break; case fSquish: regA = 1.0 / (1.0 + exp ((double) -regA)); break; case fTanh: regA = tanh ((double) regA); break; case fTan: regA = tan ((double) regA); break; case fTrunc: if (regA >= 0) regA = floor ((double) regA); else regA = ceil ((double) regA); break; case fDo: case fFor: case fIf: case fWhile: break; case fU: { /* Note: 1 value is available, index into image list. May have ImgAttr qualifier or channel qualifier or both. */ ssize_t ImgNum = ChkImgNum (pfx, regA); if (ImgNum < 0) break; regA = (fxFltType) 0; if (ImgNum == 0) { Image * pimg = pfx->Images[0]; int pech = (int)pel->ChannelQual; if (pel->ImgAttrQual == aNull) { if (pech < 0) { if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } else if (pech == HUE_CHANNEL || pech == SAT_CHANNEL || pech == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pech); break; } else if (pech == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, 0, (double) imgx, (double) imgy); break; } } else { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } } else { /* we have an image atttribute */ regA = ImageStat (pfx, 0, WHICH_ATTR_CHAN, pel->ImgAttrQual); } } else { /* We have non-zero ImgNum. */ if (pel->ImgAttrQual == aNull) { const Quantum * pv; if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, (fxFltType) imgx, (fxFltType) imgy); break; } } pv = GetCacheViewVirtualPixels ( pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual); } } break; } case fU0: { /* No args. No image attribute. We may have a ChannelQual. If called from %[fx:...], ChannelQual will be CompositePixelChannel. */ Image * pimg = pfx->Images[0]; int pech = (int)pel->ChannelQual; if (pech < 0) { if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU0 can't get cache", "%i", 0); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } else if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, 0, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, 0, (fxFltType) imgx, (fxFltType) imgy); } } else { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU0 can't get cache", "%i", 0); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } break; } case fUP: { /* 3 args are: ImgNum, x, y */ ssize_t ImgNum = ChkImgNum (pfx, regA); fxFltType fx, fy; if (ImgNum < 0) break; if (pel->IsRelative) { fx = imgx + regB; fy = imgy + regC; } else { fx = regB; fy = regC; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, fx, fy); break; } } { double v; Image * imUP = pfx->Images[ImgNum]; if (! InterpolatePixelChannel (imUP, pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN, imUP->interpolate, (double) fx, (double) fy, &v, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fUP can't get interpolate", "%lu", ImgNum); break; } regA = v * QuantumScale; } break; } case fS: case fV: { /* No args. */ ssize_t ImgNum = 1; if (pel->oprNum == fS) ImgNum = pfx->ImgNum; if (pel->ImgAttrQual == aNull) { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fV can't get cache", "%lu", ImgNum); break; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, (double) imgx, (double) imgy); break; } } regA = QuantumScale * pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual); } break; } case fP: case fSP: case fVP: { /* 2 args are: x, y */ fxFltType fx, fy; ssize_t ImgNum = pfx->ImgNum; if (pel->oprNum == fVP) ImgNum = 1; if (pel->IsRelative) { fx = imgx + regA; fy = imgy + regB; } else { fx = regA; fy = regB; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, fx, fy); } } { double v; if (! InterpolatePixelChannel (pfx->Images[ImgNum], pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN, pfx->Images[ImgNum]->interpolate, (double) fx, (double) fy, &v, pfx->exception) ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fSP or fVP can't get interp", "%lu", ImgNum); break; } regA = v * (fxFltType)QuantumScale; } break; } case fNull: break; case aDepth: regA = (fxFltType) GetImageDepth (img, pfx->exception); break; case aExtent: regA = (fxFltType) img->extent; break; case aKurtosis: regA = cs[WHICH_ATTR_CHAN].kurtosis; break; case aMaxima: regA = cs[WHICH_ATTR_CHAN].maxima; break; case aMean: regA = cs[WHICH_ATTR_CHAN].mean; break; case aMedian: regA = cs[WHICH_ATTR_CHAN].median; break; case aMinima: regA = cs[WHICH_ATTR_CHAN].minima; break; case aPage: break; case aPageX: regA = (fxFltType) img->page.x; break; case aPageY: regA = (fxFltType) img->page.y; break; case aPageWid: regA = (fxFltType) img->page.width; break; case aPageHt: regA = (fxFltType) img->page.height; break; case aPrintsize: break; case aPrintsizeX: regA = (fxFltType) PerceptibleReciprocal (img->resolution.x) * img->columns; break; case aPrintsizeY: regA = (fxFltType) PerceptibleReciprocal (img->resolution.y) * img->rows; break; case aQuality: regA = (fxFltType) img->quality; break; case aRes: break; case aResX: regA = (fxFltType) img->resolution.x; break; case aResY: regA = (fxFltType) img->resolution.y; break; case aSkewness: regA = cs[WHICH_ATTR_CHAN].skewness; break; case aStdDev: regA = cs[WHICH_ATTR_CHAN].standard_deviation; break; case aH: /* image->rows */ regA = (fxFltType) img->rows; break; case aN: /* image list length */ regA = (fxFltType) pfx->ImgListLen; break; case aT: /* image index in list */ regA = (fxFltType) pfx->ImgNum; break; case aW: /* image->columns */ regA = (fxFltType) img->columns; break; case aZ: /* image depth */ regA = (fxFltType) GetImageDepth (img, pfx->exception); break; case aNull: break; case sHue: /* of conversion to HSL */ regA = hue; break; case sIntensity: regA = GetIntensity (pfx, pfx->ImgNum, (double) imgx, (double) imgy); break; case sLightness: /* of conversion to HSL */ regA = lightness; break; case sLuma: /* calculation */ case sLuminance: /* as Luma */ regA = QuantumScale * (0.212656 * GetPixelRed (img,p) + 0.715158 * GetPixelGreen (img,p) + 0.072186 * GetPixelBlue (img,p)); break; case sSaturation: /* from conversion to HSL */ regA = saturation; break; case sA: /* alpha */ regA = QuantumScale * GetPixelAlpha (img, p); break; case sB: /* blue */ regA = QuantumScale * GetPixelBlue (img, p); break; case sC: /* red (ie cyan) */ regA = QuantumScale * GetPixelCyan (img, p); break; case sG: /* green */ regA = QuantumScale * GetPixelGreen (img, p); break; case sI: /* current x-coordinate */ regA = (fxFltType) imgx; break; case sJ: /* current y-coordinate */ regA = (fxFltType) imgy; break; case sK: /* black of CMYK */ regA = QuantumScale * GetPixelBlack (img, p); break; case sM: /* green (ie magenta) */ regA = QuantumScale * GetPixelGreen (img, p); break; case sO: /* alpha */ regA = QuantumScale * GetPixelAlpha (img, p); break; case sR: regA = QuantumScale * GetPixelRed (img, p); break; case sY: regA = QuantumScale * GetPixelYellow (img, p); break; case sNull: break; case rGoto: assert (pel->EleNdx >= 0); i = pel->EleNdx-1; /* -1 because 'for' loop will increment. */ break; case rIfZeroGoto: assert (pel->EleNdx >= 0); if (fabs((double) regA) < MagickEpsilon) i = pel->EleNdx-1; break; case rIfNotZeroGoto: assert (pel->EleNdx >= 0); if (fabs((double) regA) > MagickEpsilon) i = pel->EleNdx-1; break; case rCopyFrom: assert (pel->EleNdx >= 0); regA = pfxrt->UserSymVals[pel->EleNdx]; break; case rCopyTo: assert (pel->EleNdx >= 0); pfxrt->UserSymVals[pel->EleNdx] = regA; break; case rZerStk: pfxrt->usedValStack = 0; break; case rNull: break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "pel->oprNum", "%i '%s' not yet implemented", (int)pel->oprNum, OprStr(pel->oprNum)); break; } if (i < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad run-time address", "%i", i); } if (pel->DoPush) if (!PushVal (pfx, pfxrt, regA, i)) break; } if (pfxrt->usedValStack > 0) regA = PopVal (pfx, pfxrt, 9999); *result = regA; if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs); if (pfx->exception->severity != UndefinedException) { return MagickFalse; } if (pfxrt->usedValStack != 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack not empty", "(%i)", pfxrt->usedValStack); return MagickFalse; } return MagickTrue; } /* Following is substitute for FxEvaluateChannelExpression(). */ MagickPrivate MagickBooleanType FxEvaluateChannelExpression ( FxInfo *pfx, const PixelChannel channel, const ssize_t x, const ssize_t y, double *result, ExceptionInfo *exception) { const int id = GetOpenMPThreadId(); fxFltType ret; assert (pfx != NULL); assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); pfx->fxrts[id].thisPixel = NULL; if (!ExecuteRPN (pfx, &pfx->fxrts[id], &ret, channel, x, y)) { (void) ThrowMagickException ( exception, GetMagickModule(), OptionError, "ExcuteRPN failed", " "); return MagickFalse; } *result = (double) ret; return MagickTrue; } static FxInfo *AcquireFxInfoPrivate (const Image * images, const char * expression, MagickBooleanType CalcAllStats, ExceptionInfo *exception) { char chLimit; FxInfo * pfx = (FxInfo*) AcquireCriticalMemory (sizeof (*pfx)); memset (pfx, 0, sizeof (*pfx)); if (!InitFx (pfx, images, CalcAllStats, exception)) { pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (!BuildRPN (pfx)) { (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (*expression == '@') pfx->expression = FileToString (expression+1, ~0UL, exception); else pfx->expression = ConstantString (expression); pfx->pex = (char *)pfx->expression; pfx->teDepth = 0; if (!TranslateStatementList (pfx, ";", &chLimit)) { (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (pfx->teDepth) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Translate expression depth", "(%i) not 0", pfx->teDepth); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (chLimit != '\0' && chLimit != ';') { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "AcquireFxInfo: TranslateExpression did not exhaust input", "(chLimit=%i) at'%s'", (int)chLimit, pfx->pex); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (pfx->NeedStats && pfx->runType == rtEntireImage && !pfx->statistics) { if (!CollectStatistics (pfx)) { (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } } if (pfx->DebugOpt) { DumpTables (stderr); DumpUserSymbols (pfx, stderr); (void) DumpRPN (pfx, stderr); } { size_t number_threads=(size_t) GetMagickResourceLimit(ThreadResource); ssize_t t; pfx->fxrts = (fxRtT *)AcquireQuantumMemory (number_threads, sizeof(fxRtT)); if (!pfx->fxrts) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "fxrts", "%lu", number_threads); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } for (t=0; t < (ssize_t) number_threads; t++) { if (!AllocFxRt (pfx, &pfx->fxrts[t])) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "AllocFxRt t=", "%g", (double) t); { ssize_t t2; for (t2 = t-1; t2 >= 0; t2--) { DestroyFxRt (&pfx->fxrts[t]); } } pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } } } return pfx; } FxInfo *AcquireFxInfo (const Image * images, const char * expression, ExceptionInfo *exception) { return AcquireFxInfoPrivate (images, expression, MagickFalse, exception); } FxInfo *DestroyFxInfo (FxInfo * pfx) { ssize_t t; assert (pfx != NULL); assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); for (t=0; t < (ssize_t) GetMagickResourceLimit(ThreadResource); t++) { DestroyFxRt (&pfx->fxrts[t]); } pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts); DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } /* Following is substitute for FxImage(). */ MagickExport Image *FxImage (const Image *image, const char *expression, ExceptionInfo *exception) { #define FxImageTag "FxNew/Image" CacheView *fx_view, *image_view; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; FxInfo *pfx; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (!fx_image) return NULL; if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_image=DestroyImage(fx_image); return NULL; } pfx = AcquireFxInfoPrivate (image, expression, MagickTrue, exception); if (!pfx) { fx_image=DestroyImage(fx_image); return NULL; } assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); status=MagickTrue; progress=0; image_view = AcquireVirtualCacheView (image, pfx->exception); fx_view = AcquireAuthenticCacheView (fx_image, pfx->exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ pfx->ContainsDebug ? 0 : 1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; fxFltType result = 0.0; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels (image_view, 0, y, image->columns, 1, pfx->exception); q = QueueCacheViewAuthenticPixels (fx_view, 0, y, fx_image->columns, 1, pfx->exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; pfx->fxrts[id].thisPixel = (Quantum *)p; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel (image, i); PixelTrait traits = GetPixelChannelTraits (image, channel); PixelTrait fx_traits = GetPixelChannelTraits (fx_image, channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel (fx_image, channel, p[i], q); continue; } if (!ExecuteRPN (pfx, &pfx->fxrts[id], &result, channel, x, y)) { status=MagickFalse; break; } q[i] = ClampToQuantum ((MagickRealType) (QuantumRange*result)); } p+=GetPixelChannels (image); q+=GetPixelChannels (fx_image); } if (SyncCacheViewAuthenticPixels(fx_view, pfx->exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress (image, FxImageTag, progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view = DestroyCacheView (fx_view); image_view = DestroyCacheView (image_view); /* Before destroying the user symbol values, dump them to stderr. */ if (pfx->DebugOpt && pfx->usedUserSymbols) { int t, i; char UserSym[MagickPathExtent]; fprintf (stderr, "User symbols (%i):\n", pfx->usedUserSymbols); for (t=0; t < (int) GetMagickResourceLimit(ThreadResource); t++) { for (i = 0; i < (int) pfx->usedUserSymbols; i++) { fprintf (stderr, "th=%i us=%i '%s': %.*Lg\n", t, i, NameOfUserSym (pfx, i, UserSym), pfx->precision, pfx->fxrts[t].UserSymVals[i]); } } } if (pfx->exception->severity != UndefinedException) { status = MagickFalse; } if (status == MagickFalse) fx_image = DestroyImage (fx_image); pfx = DestroyFxInfo (pfx); return(fx_image); }
fci_contract.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "np_helper/np_helper.h" #include "fci.h" // for (16e,16o) ~ 11 MB buffer = 120 * 12870 * 8 #define STRB_BLKSIZE 112 /* * CPU timing of single thread can be estimated: * na*nb*nnorb*8(bytes)*5 / (mem_freq*64 (*2 if dual-channel mem)) * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) * where the 5 times memory accesses are 3 in prog_a_t1, prog0_b_t1, * spread_b_t1 and 2 in spread_a_t1 * * multi threads * na*nb*nnorb*8(bytes)*2 / (mem_freq*64 (*2 if dual-channel mem)) due to single thread * + na*nb*nnorb*8(bytes)*3 / max_mem_bandwidth due to N-thread * + na*nb*nnorb**2 (*2 for spin1, *1 for spin0) * / (CPU_freq (*4 for SSE3 blas, or *6-8 for AVX blas)) / num_threads */ /* *********************************************************** * * Need the permutation symmetry * h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k] * *********************************************************** */ /* * optimize for OpenMP, to reduce memory/CPU data transfer * add software prefetch, it's especially important for OpenMP */ /* * For given stra_id, spread alpah-strings (which can propagate to stra_id) * into t1[:nstrb,nnorb] * str1-of-alpha -> create/annihilate -> str0-of-alpha * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_a_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci0 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + ia*bcount; pci = ci0 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k] += pci[k]; } } else if (sign < 0) { for (k = 0; k < bcount; k++) { pt1[k] -= pci[k]; } } } } /* * For given stra_id, spread all beta-strings into t1[:nstrb,nnorb] * all str0-of-beta -> create/annihilate -> str1-of-beta * ci0[:nstra,:nstrb] is contiguous in beta-strings * bcount control the number of beta strings to be calculated. * for spin=0 system, only lower triangle of the intermediate ci vector * needs to be calculated */ void FCIprog_b_t1(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { t1[ia*bcount+str0] += sign * pci[str1]; } } tab += nlinkb; } } /* * spread t1 into ci1 */ void FCIspread_a_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { ci1 += strb_id; int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*bcount; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } void FCIspread_b_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkTrilT *clink_indexb) { int j, ia, str0, str1, sign; const _LinkTrilT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci1 + stra_id * (size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { pci[str1] += sign * t1[ia*bcount+str0]; } } tab += nlinkb; } } /* * f1e_tril is the 1e hamiltonian for spin alpha */ void FCIcontract_a_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci0, *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinka * nstra); FCIcompress_link_tril(clink, link_indexa, nstra, nlinka); for (str0 = 0; str0 < nstra; str0++) { tab = clink + str0 * nlinka; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci0 = ci0 + str0 * nstrb; pci1 = ci1 + str1 * nstrb; tmp = sign * f1e_tril[ia]; for (k = 0; k < nstrb; k++) { pci1[k] += tmp * pci0[k]; } } } free(clink); } /* * f1e_tril is the 1e hamiltonian for spin beta */ void FCIcontract_b_1e(double *f1e_tril, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, ia, sign; size_t str0, str1; double *pci1; double tmp; _LinkTrilT *tab; _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlinkb * nstrb); FCIcompress_link_tril(clink, link_indexb, nstrb, nlinkb); for (str0 = 0; str0 < nstra; str0++) { pci1 = ci1 + str0 * nstrb; for (k = 0; k < nstrb; k++) { tab = clink + k * nlinkb; tmp = ci0[str0*nstrb+k]; for (j = 0; j < nlinkb; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1[str1] += sign * tmp * f1e_tril[ia]; } } } free(clink); } void FCIcontract_1e_spin0(double *f1e_tril, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { memset(ci1, 0, sizeof(double)*na*na); FCIcontract_a_1e(f1e_tril, ci0, ci1, norb, na, na, nlink, nlink, link_index, link_index); } /* * spread t1 into ci1buf */ static void spread_bufa_t1(double *ci1, double *t1, int nrow_t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkTrilT *clink_indexa) { int j, k, ia, sign; size_t str1; const _LinkTrilT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { ia = EXTRACT_IA (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + ia*nrow_t1; cp1 = ci1 + str1*nstrb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k]; } } } } /* * bcount_for_spread_a is different for spin1 and spin0 */ static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount_for_spread_a, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); //FCIspread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0, // norb, ncol_ci1buf, nlinka, clink_indexa); spread_bufa_t1(ci1buf, vt1, bcount, bcount_for_spread_a, stra_id, 0, norb, ncol_ci1buf, nlinka, clink_indexa); } void FCIaxpy2d(double *out, double *in, size_t count, size_t no, size_t ni) { int i, j; for (i = 0; i < count; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += in[i*ni+j]; } } } /* * nlink = nocc*nvir, num. all possible strings that a string can link to * link_index[str0] == linking map between str0 and other strings * link_index[str0][ith-linking-string] == * [tril(creation_op,annihilation_op),0,linking-string-id,sign] * FCIcontract_2e_spin0 only compute half of the contraction, due to the * symmetry between alpha and beta spin. The right contracted ci vector * is (ci1+ci1.T) */ void FCIcontract_2e_spin0(double *eri, double *ci0, double *ci1, int norb, int na, int nlink, int *link_index) { _LinkTrilT *clink = malloc(sizeof(_LinkTrilT) * nlink * na); FCIcompress_link_tril(clink, link_index, na, nlink); memset(ci1, 0, sizeof(double)*na*na); double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < na; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, na-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static, 112) /* strk starts from MAX(strk0, ib), because [0:ib,0:ib] have been evaluated */ for (strk = ib; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, MIN(STRB_BLKSIZE, strk-ib), blen, MIN(STRB_BLKSIZE, strk+1-ib), strk, ib, norb, na, na, nlink, nlink, clink, clink); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, na, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_rhf2e_kern #pragma omp barrier } free(ci1buf); free(t1buf); } free(clink); } void FCIcontract_2e_spin1(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, blen, blen, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_rhf2e_kern #pragma omp barrier } free(ci1buf); free(t1buf); } free(clinka); free(clinkb); } /* * eri_ab is mixed integrals (alpha,alpha|beta,beta), |beta,beta) in small strides */ static void ctr_uhf2e_kern(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1)/2; double *t1a = t1buf; double *t1b = t1a + nnorb*bcount; double *vt1 = t1b + nnorb*bcount; memset(t1a, 0, sizeof(double)*nnorb*bcount); memset(t1b, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1a, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); FCIprog_b_t1(ci0, t1b, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_T, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_ab, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_bb, &nnorb, &D1, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1a, &bcount, eri_aa, &nnorb, &D0, vt1, &bcount); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1b, &bcount, eri_ab, &nnorb, &D1, vt1, &bcount); FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0, norb, bcount, nlinka, clink_indexa); } void FCIcontract_uhf2e(double *eri_aa, double *eri_ab, double *eri_bb, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)*2+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_uhf2e_kern(eri_aa, eri_ab, eri_bb, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_uhf2e_kern #pragma omp barrier } free(t1buf); free(ci1buf); } free(clinka); free(clinkb); } /************************************************* * hdiag *************************************************/ void FCImake_hdiag_uhf(double *hdiag, double *h1e_a, double *h1e_b, double *jdiag_aa, double *jdiag_ab, double *jdiag_bb, double *kdiag_aa, double *kdiag_bb, int norb, int nstra, int nstrb, int nocca, int noccb, int *occslista, int *occslistb) { #pragma omp parallel { int j, j0, k0, jk, jk0; size_t ia, ib; double e1, e2; int *paocc, *pbocc; #pragma omp for schedule(static) for (ia = 0; ia < nstra; ia++) { paocc = occslista + ia * nocca; for (ib = 0; ib < nstrb; ib++) { e1 = 0; e2 = 0; pbocc = occslistb + ib * noccb; for (j0 = 0; j0 < nocca; j0++) { j = paocc[j0]; jk0 = j * norb; e1 += h1e_a[j*norb+j]; for (k0 = 0; k0 < nocca; k0++) { // (alpha|alpha) jk = jk0 + paocc[k0]; e2 += jdiag_aa[jk] - kdiag_aa[jk]; } for (k0 = 0; k0 < noccb; k0++) { // (alpha|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_ab[jk] * 2; } } for (j0 = 0; j0 < noccb; j0++) { j = pbocc[j0]; jk0 = j * norb; e1 += h1e_b[j*norb+j]; for (k0 = 0; k0 < noccb; k0++) { // (beta|beta) jk = jk0 + pbocc[k0]; e2 += jdiag_bb[jk] - kdiag_bb[jk]; } } hdiag[ia*nstrb+ib] = e1 + e2 * .5; } } } } void FCImake_hdiag(double *hdiag, double *h1e, double *jdiag, double *kdiag, int norb, int na, int nocc, int *occslst) { FCImake_hdiag_uhf(hdiag, h1e, h1e, jdiag, jdiag, jdiag, kdiag, kdiag, norb, na, na, nocc, nocc, occslst, occslst); } static int first1(uint64_t r) { #ifdef HAVE_FFS return ffsll(r) - 1; #else int n = 0; if (r >> (n + 32)) n += 32; if (r >> (n + 16)) n += 16; if (r >> (n + 8)) n += 8; if (r >> (n + 4)) n += 4; if (r >> (n + 2)) n += 2; if (r >> (n + 1)) n += 1; return n; #endif } /************************************************* * pspace Hamiltonian, ref CPL, 169, 463 *************************************************/ /* * sub-space Hamiltonian (tril part) of the determinants (stra,strb) */ void FCIpspace_h0tril_uhf(double *h0, double *h1e_a, double *h1e_b, double *g2e_aa, double *g2e_ab, double *g2e_bb, uint64_t *stra, uint64_t *strb, int norb, int np) { const int d2 = norb * norb; const int d3 = norb * norb * norb; #pragma omp parallel { int i, j, k, pi, pj, pk, pl; int n1da, n1db; uint64_t da, db, str1; double tmp; #pragma omp for schedule(dynamic) for (i = 0; i < np; i++) { for (j = 0; j < i; j++) { da = stra[i] ^ stra[j]; db = strb[i] ^ strb[j]; n1da = FCIpopcount_1(da); n1db = FCIpopcount_1(db); switch (n1da) { case 0: switch (n1db) { case 2: pi = first1(db & strb[i]); pj = first1(db & strb[j]); tmp = h1e_b[pi*norb+pj]; for (k = 0; k < norb; k++) { if (stra[i] & (1ULL<<k)) { tmp += g2e_ab[pi*norb+pj+k*d3+k*d2]; } if (strb[i] & (1ULL<<k)) { tmp += g2e_bb[pi*d3+pj*d2+k*norb+k] - g2e_bb[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, strb[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 4: pi = first1(db & strb[i]); pj = first1(db & strb[j]); pk = first1((db & strb[i]) ^ (1ULL<<pi)); pl = first1((db & strb[j]) ^ (1ULL<<pj)); str1 = strb[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, strb[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_bb[pi*d3+pj*d2+pk*norb+pl] - g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_bb[pi*d3+pj*d2+pk*norb+pl] + g2e_bb[pi*d3+pl*d2+pk*norb+pj]; } } break; case 2: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); tmp = h1e_a[pi*norb+pj]; for (k = 0; k < norb; k++) { if (strb[i] & (1ULL<<k)) { tmp += g2e_ab[pi*d3+pj*d2+k*norb+k]; } if (stra[i] & (1ULL<<k)) { tmp += g2e_aa[pi*d3+pj*d2+k*norb+k] - g2e_aa[pi*d3+k*d2+k*norb+pj]; } } if (FCIcre_des_sign(pi, pj, stra[j]) > 0) { h0[i*np+j] = tmp; } else { h0[i*np+j] = -tmp; } break; case 2: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1(db & strb[i]); pl = first1(db & strb[j]); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, strb[j]) > 0) { h0[i*np+j] = g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } else { h0[i*np+j] =-g2e_ab[pi*d3+pj*d2+pk*norb+pl]; } } break; case 4: switch (n1db) { case 0: pi = first1(da & stra[i]); pj = first1(da & stra[j]); pk = first1((da & stra[i]) ^ (1ULL<<pi)); pl = first1((da & stra[j]) ^ (1ULL<<pj)); str1 = stra[j] ^ (1ULL<<pi) ^ (1ULL<<pj); if (FCIcre_des_sign(pi, pj, stra[j]) *FCIcre_des_sign(pk, pl, str1) > 0) { h0[i*np+j] = g2e_aa[pi*d3+pj*d2+pk*norb+pl] - g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } else { h0[i*np+j] =-g2e_aa[pi*d3+pj*d2+pk*norb+pl] + g2e_aa[pi*d3+pl*d2+pk*norb+pj]; } } break; } } } } } void FCIpspace_h0tril(double *h0, double *h1e, double *g2e, uint64_t *stra, uint64_t *strb, int norb, int np) { FCIpspace_h0tril_uhf(h0, h1e, h1e, g2e, g2e, g2e, stra, strb, norb, np); } /*********************************************************************** * * With symmetry * * Note the ordering in eri and the index in link_index * eri is a tril matrix, it should be reordered wrt the irrep of the * direct product E_i^j. The 2D array eri(ij,kl) is a diagonal block * matrix. Each block is associated with an irrep. * link_index[str_id,pair_id,0] which is the index of pair_id, should be * reordered wrt the irreps accordingly * * dimirrep stores the number of occurence for each irrep * ***********************************************************************/ static void pick_link_by_irrep(_LinkTrilT *clink, int *link_index, int nstr, int nlink, int eri_irrep) { int i, j, k; for (i = 0; i < nstr; i++) { for (k = 0, j = 0; k < nlink; k++) { if (link_index[k*4+1] == eri_irrep) { clink[j].ia = link_index[k*4+0]; clink[j].addr = link_index[k*4+2]; clink[j].sign = link_index[k*4+3]; j++; } } if (j < nlink) { clink[j].sign = 0; } clink += nlink; link_index += nlink * 4; } } static void ctr_rhf2esym_kern1(double *eri, double *ci0, double *ci1ab, double *ci1buf, double *t1buf, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int nnorb, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, 0, nb, nlinka, clink_indexa); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1ab, vt1, bcount, stra_id, strb_id, 0, nb_intermediate, nlinkb, clink_indexb); spread_bufa_t1(ci1buf, vt1, bcount, bcount, stra_id, 0, 0, ncol_ci1buf, nlinka, clink_indexa); } static void loop_c2e_symm1(double *eri, double *ci0, double *ci1aa, double *ci1ab, int nnorb, int na_intermediate, int nb_intermediate, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clinka, _LinkTrilT *clinkb) { double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib; size_t blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*nnorb*2+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na_intermediate; strk++) { ctr_rhf2esym_kern1(eri, ci0, ci1ab, ci1buf, t1buf, blen, blen, strk, ib, nnorb, nb_intermediate, na, nb, nlinka, nlinkb, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1aa+ib, ci1buf, na, nb, blen); // An explicit barrier to ensure ci1 is updated. Without barrier, there may // occur race condition between FCIaxpy2d and ctr_rhf2esym_kern1 #pragma omp barrier } free(ci1buf); free(t1buf); } } #define TOTIRREPS 8 void FCIcontract_2e_symm1(double **eris, double **ci0, double **ci1, int norb, int *nas, int *nbs, int nlinka, int nlinkb, int **linka, int **linkb, int *dimirrep, int wfnsym) { int i; int na = 0; int nb = 0; for (i = 0; i < TOTIRREPS; i++) { na = MAX(nas[i], na); nb = MAX(nbs[i], nb); } _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); int ai_ir, stra_ir, strb_ir, intera_ir, interb_ir, ma, mb; for (stra_ir = 0; stra_ir < TOTIRREPS; stra_ir++) { for (ai_ir = 0; ai_ir < TOTIRREPS; ai_ir++) { strb_ir = wfnsym^stra_ir; ma = nas[stra_ir]; mb = nbs[strb_ir]; if (ma > 0 && mb > 0 && dimirrep[ai_ir] > 0) { intera_ir = ai_ir^stra_ir; interb_ir = ai_ir^strb_ir; // clinka for inter_ir*ai_ir -> stra_ir pick_link_by_irrep(clinka, linka[intera_ir], nas[intera_ir], nlinka, ai_ir); // clinka for strb_ir*ai_ir -> inter_ir pick_link_by_irrep(clinkb, linkb[strb_ir], nbs[strb_ir], nlinkb, ai_ir); loop_c2e_symm1(eris[ai_ir], ci0[stra_ir], ci1[stra_ir], ci1[intera_ir], dimirrep[ai_ir], nas[intera_ir], nbs[interb_ir], ma, mb, nlinka, nlinkb, clinka, clinkb); } } } free(clinka); free(clinkb); }
DRB028-privatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be annotated as private to avoid race condition. Data race pairs: tmp@65:5 vs. tmp@66:12 tmp@65:5 vs. tmp@65:5 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int tmp; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for for (i=0;i<len;i++) { tmp =a[i]+i; a[i] = tmp; } printf("a[50]=%d\n", a[50]); return 0; }
requires.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu -allow-empty -check-prefix=DEBUG // REQUIRES: libomptarget-debug /* Test for the 'requires' clause check. When a target region is used, the requires flags are set in the runtime for the entire compilation unit. If the flags are set again, (for whatever reason) the set must be consistent with previously set values. */ #include <stdio.h> #include <omp.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- void run_reg_requires() { // Before the target region is registered, the requires registers the status // of the requires clauses. Since there are no requires clauses in this file // the flags state can only be OMP_REQ_NONE i.e. 1. // This is the 2nd time this function is called so it should print the debug // info belonging to the check. __tgt_register_requires(1); __tgt_register_requires(1); // DEBUG: New requires flags 1 compatible with existing 1! } // --------------------------------------------------------------------------- int main() { run_reg_requires(); // This also runs reg requires for the first time. #pragma omp target {} return 0; }
HybridRepCenterOrbitals.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // ////////////////////////////////////////////////////////////////////////////////////// /** @file HybridRepCenterOrbitals.h * * Hybrid representation atomic centered orbitals */ #ifndef QMCPLUSPLUS_HYBRIDREP_CENTER_ORBITALS_H #define QMCPLUSPLUS_HYBRIDREP_CENTER_ORBITALS_H #include <Particle/DistanceTableData.h> #include <QMCWaveFunctions/LCAO/SoaSphericalTensor.h> #include <spline2/MultiBspline1D.hpp> #include <Numerics/SmoothFunctions.hpp> namespace qmcplusplus { template<typename ST> class AtomicOrbitals { public: static const int D = 3; using AtomicSplineType = typename bspline_traits<ST, 1>::SplineType; using AtomicBCType = typename bspline_traits<ST, 1>::BCType; using AtomicSingleSplineType = UBspline_1d_d; using PointType = TinyVector<ST, D>; using value_type = ST; using vContainer_type = aligned_vector<ST>; private: // near core cutoff ST rmin; // far from core cutoff, rmin_sqrt>=rmin ST rmin_sqrt; ST cutoff, cutoff_buffer, spline_radius, non_overlapping_radius; int spline_npoints, BaseN; int NumBands, Npad; PointType center_pos; const int lmax, lm_tot; SoaSphericalTensor<ST> Ylm; vContainer_type l_vals; vContainer_type r_power_minus_l; ///1D spline of radial functions of all the orbitals std::shared_ptr<MultiBspline1D<ST>> SplineInst; vContainer_type localV, localG, localL; public: AtomicOrbitals(int Lmax) : lmax(Lmax), lm_tot((Lmax + 1) * (Lmax + 1)), Ylm(Lmax) { r_power_minus_l.resize(lm_tot); l_vals.resize(lm_tot); for (int l = 0; l <= lmax; l++) for (int m = -l; m <= l; m++) l_vals[l * (l + 1) + m] = l; rmin = std::exp(std::log(std::numeric_limits<ST>::min()) / std::max(Lmax, 1)); rmin = std::max(rmin, std::numeric_limits<ST>::epsilon()); rmin_sqrt = std::max(rmin, std::sqrt(std::numeric_limits<ST>::epsilon())); } // accessing functions, const only ST getCutoff() const { return cutoff; } ST getCutoffBuffer() const { return cutoff_buffer; } ST getSplineRadius() const { return spline_radius; } ST getNonOverlappingRadius() const { return non_overlapping_radius; } int getSplineNpoints() const { return spline_npoints; } int getLmax() const { return lmax; } const PointType& getCenterPos() const { return center_pos; } inline void resizeStorage(size_t Nb) { NumBands = Nb; Npad = getAlignedSize<ST>(Nb); localV.resize(Npad * lm_tot); localG.resize(Npad * lm_tot); localL.resize(Npad * lm_tot); create_spline(); } void bcast_tables(Communicate* comm) { chunked_bcast(comm, SplineInst->getSplinePtr()); } void gather_tables(Communicate* comm, std::vector<int>& offset) { gatherv(comm, SplineInst->getSplinePtr(), Npad, offset); } template<typename PT, typename VT> inline void set_info(const PT& R, const VT& cutoff_in, const VT& cutoff_buffer_in, const VT& spline_radius_in, const VT& non_overlapping_radius_in, const int spline_npoints_in) { center_pos[0] = R[0]; center_pos[1] = R[1]; center_pos[2] = R[2]; cutoff = cutoff_in; cutoff_buffer = cutoff_buffer_in; spline_radius = spline_radius_in; spline_npoints = spline_npoints_in; non_overlapping_radius = non_overlapping_radius_in; BaseN = spline_npoints + 2; } inline void create_spline() { AtomicBCType bc; bc.lCode = FLAT; bc.rCode = NATURAL; Ugrid grid; grid.start = 0.0; grid.end = spline_radius; grid.num = spline_npoints; SplineInst = std::make_shared<MultiBspline1D<ST>>(); SplineInst->create(grid, bc, lm_tot * Npad); } inline size_t getSplineSizeInBytes() const { return SplineInst->sizeInByte(); } inline void flush_zero() { SplineInst->flush_zero(); } inline void set_spline(AtomicSingleSplineType* spline, int lm, int ispline) { SplineInst->copy_spline(spline, lm * Npad + ispline, 0, BaseN); } bool read_splines(hdf_archive& h5f) { einspline_engine<AtomicSplineType> bigtable(SplineInst->getSplinePtr()); int lmax_in, spline_npoints_in; ST spline_radius_in; bool success = true; success = success && h5f.readEntry(lmax_in, "l_max"); success = success && h5f.readEntry(spline_radius_in, "spline_radius"); success = success && h5f.readEntry(spline_npoints_in, "spline_npoints"); if (lmax_in != lmax) return false; if (spline_radius_in != spline_radius) return false; if (spline_npoints_in != spline_npoints) return false; return success && h5f.readEntry(bigtable, "radial_spline"); } bool write_splines(hdf_archive& h5f) { bool success = true; success = success && h5f.writeEntry(spline_radius, "spline_radius"); success = success && h5f.writeEntry(spline_npoints, "spline_npoints"); success = success && h5f.writeEntry(lmax, "l_max"); success = success && h5f.writeEntry(center_pos, "position"); einspline_engine<AtomicSplineType> bigtable(SplineInst->getSplinePtr()); success = success && h5f.writeEntry(bigtable, "radial_spline"); return success; } //evaluate only V template<typename VV> inline void evaluate_v(const ST& r, const PointType& dr, VV& myV) { if (r > std::numeric_limits<ST>::epsilon()) Ylm.evaluateV(dr[0] / r, dr[1] / r, dr[2] / r); else Ylm.evaluateV(0, 0, 1); const ST* restrict Ylm_v = Ylm[0]; constexpr ST czero(0); ST* restrict val = myV.data(); ST* restrict local_val = localV.data(); std::fill(myV.begin(), myV.end(), czero); SplineInst->evaluate(r, localV); for (size_t lm = 0; lm < lm_tot; lm++) { #pragma omp simd aligned(val, local_val) for (size_t ib = 0; ib < myV.size(); ib++) val[ib] += Ylm_v[lm] * local_val[ib]; local_val += Npad; } } template<typename DISPL, typename VM> inline void evaluateValues(const DISPL& Displacements, const int center_idx, const ST& r, VM& multi_myV) { if (r <= std::numeric_limits<ST>::epsilon()) Ylm.evaluateV(0, 0, 1); const ST* restrict Ylm_v = Ylm[0]; const size_t m = multi_myV.cols(); constexpr ST czero(0); std::fill(multi_myV.begin(), multi_myV.end(), czero); SplineInst->evaluate(r, localV); for (int ivp = 0; ivp < Displacements.size(); ivp++) { PointType dr = Displacements[ivp][center_idx]; if (r > std::numeric_limits<ST>::epsilon()) Ylm.evaluateV(-dr[0] / r, -dr[1] / r, -dr[2] / r); ST* restrict val = multi_myV[ivp]; ST* restrict local_val = localV.data(); for (size_t lm = 0; lm < lm_tot; lm++) { #pragma omp simd aligned(val, local_val) for (size_t ib = 0; ib < m; ib++) val[ib] += Ylm_v[lm] * local_val[ib]; local_val += Npad; } } } //evaluate VGL template<typename VV, typename GV> inline void evaluate_vgl(const ST& r, const PointType& dr, VV& myV, GV& myG, VV& myL) { ST drx, dry, drz, rhatx, rhaty, rhatz, rinv; if (r > rmin) { rinv = 1.0 / r; } else { rinv = 0; } drx = dr[0]; dry = dr[1]; drz = dr[2]; rhatx = drx * rinv; rhaty = dry * rinv; rhatz = drz * rinv; Ylm.evaluateVGL(drx, dry, drz); const ST* restrict Ylm_v = Ylm[0]; const ST* restrict Ylm_gx = Ylm[1]; const ST* restrict Ylm_gy = Ylm[2]; const ST* restrict Ylm_gz = Ylm[3]; ST* restrict g0 = myG.data(0); ST* restrict g1 = myG.data(1); ST* restrict g2 = myG.data(2); constexpr ST czero(0), cone(1), chalf(0.5); std::fill(myV.begin(), myV.end(), czero); std::fill(g0, g0 + Npad, czero); std::fill(g1, g1 + Npad, czero); std::fill(g2, g2 + Npad, czero); std::fill(myL.begin(), myL.end(), czero); ST* restrict val = myV.data(); ST* restrict lapl = myL.data(); ST* restrict local_val = localV.data(); ST* restrict local_grad = localG.data(); ST* restrict local_lapl = localL.data(); SplineInst->evaluate_vgl(r, localV, localG, localL); if (r > rmin_sqrt) { // far from core r_power_minus_l[0] = cone; ST r_power_temp = cone; for (int l = 1; l <= lmax; l++) { r_power_temp *= rinv; for (int m = -l, lm = l * l; m <= l; m++, lm++) r_power_minus_l[lm] = r_power_temp; } for (size_t lm = 0; lm < lm_tot; lm++) { const ST& l_val = l_vals[lm]; const ST& r_power = r_power_minus_l[lm]; const ST Ylm_rescale = Ylm_v[lm] * r_power; const ST rhat_dot_G = (rhatx * Ylm_gx[lm] + rhaty * Ylm_gy[lm] + rhatz * Ylm_gz[lm]) * r_power; #pragma omp simd aligned(val, g0, g1, g2, lapl, local_val, local_grad, local_lapl) for (size_t ib = 0; ib < myV.size(); ib++) { const ST local_v = local_val[ib]; const ST local_g = local_grad[ib]; const ST local_l = local_lapl[ib]; // value const ST Vpart = l_val * rinv * local_v; val[ib] += Ylm_rescale * local_v; // grad const ST factor1 = local_g * Ylm_rescale; const ST factor2 = local_v * r_power; const ST factor3 = -Vpart * Ylm_rescale; g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx; g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty; g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz; // laplacian lapl[ib] += (local_l + (local_g * (2 - l_val) - Vpart) * rinv) * Ylm_rescale + (local_g - Vpart) * rhat_dot_G; } local_val += Npad; local_grad += Npad; local_lapl += Npad; } } else if (r > rmin) { // the possibility of reaching here is very very low std::cout << "Warning: an electron is very close to an ion, distance=" << r << " be careful!" << std::endl; // near core, kill divergence in the laplacian r_power_minus_l[0] = cone; ST r_power_temp = cone; for (int l = 1; l <= lmax; l++) { r_power_temp *= rinv; for (int m = -l, lm = l * l; m <= l; m++, lm++) r_power_minus_l[lm] = r_power_temp; } for (size_t lm = 0; lm < lm_tot; lm++) { const ST& l_val = l_vals[lm]; const ST& r_power = r_power_minus_l[lm]; const ST Ylm_rescale = Ylm_v[lm] * r_power; const ST rhat_dot_G = (Ylm_gx[lm] * rhatx + Ylm_gy[lm] * rhaty + Ylm_gz[lm] * rhatz) * r_power * r; #pragma omp simd aligned(val, g0, g1, g2, lapl, local_val, local_grad, local_lapl) for (size_t ib = 0; ib < myV.size(); ib++) { const ST local_v = local_val[ib]; const ST local_g = local_grad[ib]; const ST local_l = local_lapl[ib]; // value const ST Vpart = Ylm_rescale * local_v; val[ib] += Vpart; // grad const ST factor1 = local_g * Ylm_rescale; const ST factor2 = local_v * r_power; const ST factor3 = -l_val * Vpart * rinv; g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx; g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty; g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz; // laplacian lapl[ib] += local_l * (cone - chalf * l_val) * (3 * Ylm_rescale + rhat_dot_G); } local_val += Npad; local_grad += Npad; local_lapl += Npad; } } else { std::cout << "Warning: an electron is on top of an ion!" << std::endl; // strictly zero #pragma omp simd aligned(val, lapl, local_val, local_lapl) for (size_t ib = 0; ib < myV.size(); ib++) { // value val[ib] = Ylm_v[0] * local_val[ib]; // laplacian lapl[ib] = local_lapl[ib] * static_cast<ST>(3) * Ylm_v[0]; } local_val += Npad; local_grad += Npad; local_lapl += Npad; if (lm_tot > 0) { //std::cout << std::endl; for (size_t lm = 1; lm < 4; lm++) { #pragma omp simd aligned(g0, g1, g2, local_grad) for (size_t ib = 0; ib < myV.size(); ib++) { const ST local_g = local_grad[ib]; // grad g0[ib] += local_g * Ylm_gx[lm]; g1[ib] += local_g * Ylm_gy[lm]; g2[ib] += local_g * Ylm_gz[lm]; } local_grad += Npad; } } } } template<typename VV, typename GV, typename HT> void evaluate_vgh(const ST& r, const PointType& dr, VV& myV, GV& myG, HT& myH) { //Needed to do tensor product here APP_ABORT("AtomicOrbitals::evaluate_vgh"); } }; template<typename ST> class HybridRepCenterOrbitals { public: static const int D = 3; using PointType = typename AtomicOrbitals<ST>::PointType; using RealType = typename DistanceTableData::RealType; using PosType = typename DistanceTableData::PosType; private: ///atomic centers std::vector<AtomicOrbitals<ST>> AtomicCenters; ///table index int myTableID; ///mapping supercell to primitive cell std::vector<int> Super2Prim; ///r from distance table RealType dist_r; ///dr from distance table PosType dist_dr; ///for APBC PointType r_image; ///smooth function value RealType f; ///smooth function first derivative RealType df_dr; ///smooth function second derivative RealType d2f_dr2; ///smoothing schemes enum class smoothing_schemes { CONSISTENT = 0, SMOOTHALL, SMOOTHPARTIAL } smooth_scheme; /// smoothing function smoothing_functions smooth_func_id; public: HybridRepCenterOrbitals() {} void set_info(const ParticleSet& ions, ParticleSet& els, const std::vector<int>& mapping) { myTableID = els.addTable(ions, DT_SOA); Super2Prim = mapping; } inline void resizeStorage(size_t Nb) { size_t SplineCoefsBytes = 0; for (int ic = 0; ic < AtomicCenters.size(); ic++) { AtomicCenters[ic].resizeStorage(Nb); SplineCoefsBytes += AtomicCenters[ic].getSplineSizeInBytes(); } app_log() << "MEMORY " << SplineCoefsBytes / (1 << 20) << " MB allocated " << "for the atomic radial splines in hybrid orbital representation" << std::endl; } void bcast_tables(Communicate* comm) { for (int ic = 0; ic < AtomicCenters.size(); ic++) AtomicCenters[ic].bcast_tables(comm); } void gather_atomic_tables(Communicate* comm, std::vector<int>& offset) { if (comm->size() == 1) return; for (int ic = 0; ic < AtomicCenters.size(); ic++) AtomicCenters[ic].gather_tables(comm, offset); } inline void flush_zero() { for (int ic = 0; ic < AtomicCenters.size(); ic++) AtomicCenters[ic].flush_zero(); } bool read_splines(hdf_archive& h5f) { bool success = true; size_t ncenter; success = success && h5f.push("atomic_centers", false); success = success && h5f.readEntry(ncenter, "number_of_centers"); if (!success) return success; if (ncenter != AtomicCenters.size()) success = false; // read splines of each center for (int ic = 0; ic < AtomicCenters.size(); ic++) { std::ostringstream gname; gname << "center_" << ic; success = success && h5f.push(gname.str().c_str(), false); success = success && AtomicCenters[ic].read_splines(h5f); h5f.pop(); } h5f.pop(); return success; } bool write_splines(hdf_archive& h5f) { bool success = true; int ncenter = AtomicCenters.size(); success = success && h5f.push("atomic_centers", true); success = success && h5f.writeEntry(ncenter, "number_of_centers"); // write splines of each center for (int ic = 0; ic < AtomicCenters.size(); ic++) { std::ostringstream gname; gname << "center_" << ic; success = success && h5f.push(gname.str().c_str(), true); success = success && AtomicCenters[ic].write_splines(h5f); h5f.pop(); } h5f.pop(); return success; } template<typename Cell> inline int get_bc_sign(const PointType& r, const Cell& PrimLattice, TinyVector<int, D>& HalfG) { int bc_sign = 0; PointType shift_unit = PrimLattice.toUnit(r - r_image); for (int i = 0; i < D; i++) { ST img = round(shift_unit[i]); bc_sign += HalfG[i] * (int)img; } return bc_sign; } //evaluate only V template<typename VV> inline RealType evaluate_v(const ParticleSet& P, const int iat, VV& myV) { const auto& ei_dist = P.getDistTable(myTableID); const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat); if (center_idx < 0) abort(); auto& myCenter = AtomicCenters[Super2Prim[center_idx]]; if (dist_r < myCenter.getCutoff()) { PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]); r_image = myCenter.getCenterPos() + dr; myCenter.evaluate_v(dist_r, dr, myV); return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r); } return RealType(-1); } /* check if the batched algorithm is safe to operate * @param VP virtual particle set * @return true if it is safe * * When the reference electron in the NLPP evaluation has a distance larger than the non overlapping radius of the reference center. * Some qudrature points may get its SPOs evaluated from the nearest center which is not the reference center. * The batched algorthm forces the evaluation on the reference center and introduce some error. * In this case, the non-batched algorithm should be used. */ bool is_batched_safe(const VirtualParticleSet& VP) { const int center_idx = VP.refSourcePtcl; auto& myCenter = AtomicCenters[Super2Prim[center_idx]]; return VP.refPS.getDistTable(myTableID).getDistRow(VP.refPtcl)[center_idx] < myCenter.getNonOverlappingRadius(); } // C2C, C2R cases template<typename VM> inline RealType evaluateValuesC2X(const VirtualParticleSet& VP, VM& multi_myV) { const int center_idx = VP.refSourcePtcl; dist_r = VP.refPS.getDistTable(myTableID).getDistRow(VP.refPtcl)[center_idx]; auto& myCenter = AtomicCenters[Super2Prim[center_idx]]; if (dist_r < myCenter.getCutoff()) { myCenter.evaluateValues(VP.getDistTable(myTableID).getDisplacements(), center_idx, dist_r, multi_myV); return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r); } return RealType(-1); } // R2R case template<typename VM, typename Cell, typename SV> inline RealType evaluateValuesR2R(const VirtualParticleSet& VP, const Cell& PrimLattice, TinyVector<int, D>& HalfG, VM& multi_myV, SV& bc_signs) { const int center_idx = VP.refSourcePtcl; dist_r = VP.refPS.getDistTable(myTableID).getDistRow(VP.refPtcl)[center_idx]; auto& myCenter = AtomicCenters[Super2Prim[center_idx]]; if (dist_r < myCenter.getCutoff()) { const auto& displ = VP.getDistTable(myTableID).getDisplacements(); for (int ivp = 0; ivp < VP.getTotalNum(); ivp++) { r_image = myCenter.getCenterPos() - displ[ivp][center_idx]; bc_signs[ivp] = get_bc_sign(VP.R[ivp], PrimLattice, HalfG); ; } myCenter.evaluateValues(displ, center_idx, dist_r, multi_myV); return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r); } return RealType(-1); } //evaluate only VGL template<typename VV, typename GV> inline RealType evaluate_vgl(const ParticleSet& P, const int iat, VV& myV, GV& myG, VV& myL) { const auto& ei_dist = P.getDistTable(myTableID); const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat); if (center_idx < 0) abort(); auto& myCenter = AtomicCenters[Super2Prim[center_idx]]; if (dist_r < myCenter.getCutoff()) { PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]); r_image = myCenter.getCenterPos() + dr; myCenter.evaluate_vgl(dist_r, dr, myV, myG, myL); return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r); } return RealType(-1); } //evaluate only VGH template<typename VV, typename GV, typename HT> inline RealType evaluate_vgh(const ParticleSet& P, const int iat, VV& myV, GV& myG, HT& myH) { const auto& ei_dist = P.getDistTable(myTableID); const int center_idx = ei_dist.get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl == iat); if (center_idx < 0) abort(); auto& myCenter = AtomicCenters[Super2Prim[center_idx]]; if (dist_r < myCenter.getCutoff()) { PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]); r_image = myCenter.getCenterPos() + dr; myCenter.evaluate_vgh(dist_r, dr, myV, myG, myH); return smooth_function(myCenter.getCutoffBuffer(), myCenter.getCutoff(), dist_r); } return RealType(-1); } // interpolate buffer region, value only template<typename VV> inline void interpolate_buffer_v(VV& psi, const VV& psi_AO) const { const RealType cone(1); for (size_t i = 0; i < psi.size(); i++) psi[i] = psi_AO[i] * f + psi[i] * (cone - f); } // interpolate buffer region, value, gradients and laplacian template<typename VV, typename GV> inline void interpolate_buffer_vgl(VV& psi, GV& dpsi, VV& d2psi, const VV& psi_AO, const GV& dpsi_AO, const VV& d2psi_AO) const { const RealType cone(1), ctwo(2); const RealType rinv(1.0 / dist_r); if (smooth_scheme == smoothing_schemes::CONSISTENT) for (size_t i = 0; i < psi.size(); i++) { // psi, dpsi, d2psi are all consistent d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f) + df_dr * rinv * ctwo * dot(dpsi[i] - dpsi_AO[i], dist_dr) + (psi_AO[i] - psi[i]) * (d2f_dr2 + ctwo * rinv * df_dr); dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f) + df_dr * rinv * dist_dr * (psi[i] - psi_AO[i]); psi[i] = psi_AO[i] * f + psi[i] * (cone - f); } else if (smooth_scheme == smoothing_schemes::SMOOTHALL) for (size_t i = 0; i < psi.size(); i++) { d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f); dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f); psi[i] = psi_AO[i] * f + psi[i] * (cone - f); } else if (smooth_scheme == smoothing_schemes::SMOOTHPARTIAL) for (size_t i = 0; i < psi.size(); i++) { // dpsi, d2psi are consistent but psi is not. d2psi[i] = d2psi_AO[i] * f + d2psi[i] * (cone - f) + df_dr * rinv * ctwo * dot(dpsi[i] - dpsi_AO[i], dist_dr); dpsi[i] = dpsi_AO[i] * f + dpsi[i] * (cone - f); psi[i] = psi_AO[i] * f + psi[i] * (cone - f); } else throw std::runtime_error("Unknown smooth scheme!"); } inline RealType smooth_function(const ST& cutoff_buffer, const ST& cutoff, const RealType r) { const RealType cone(1); if (r < cutoff_buffer) return cone; const RealType scale = cone / (cutoff - cutoff_buffer); const RealType x = (r - cutoff_buffer) * scale; f = smoothing(smooth_func_id, x, df_dr, d2f_dr2); df_dr *= scale; d2f_dr2 *= scale * scale; return f; } template<class BSPLINESPO> friend class HybridRepSetReader; }; } // namespace qmcplusplus #endif
saxpy_neon.c
/* * File: saxpy_neon.c * Author: Malcolm Davis * Course: Computer Architecture II * Created on May 12, 2018 * Simple SAXPY(Single-precision Alpha*X Plus Y) operation with OpenMP and NEON * * Ussage: * ./argv[0] for default parameters and random vectors or; * ./argv[0] <array size> */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <unistd.h> #include <arm_neon.h> #include <omp.h> #define FLOAT_RAND_MAX 10000 #define VECTOR_SIZE 100000 void generateIntVector(int16_t *vector, long size); void printIntVector(int16_t *vector, long size); int16_t* SAXPY(long size, int16_t *alpha, int16_t *X, int16_t *Y); /* * Main method, retrive command line options and run the saxpy */ int main(int argc, char const *argv[]) { const int printVectors = getenv("PRINT_VECTORS") ? 1 : 0; double start_time, run_time; srand(time(NULL)); // If the vector size is inserted then use it if not then use the default long size = argc > 1 && atol(argv[1]) > 0 ? atol(argv[1]) : VECTOR_SIZE; int alpha = ceil(((float)rand()/(float)(RAND_MAX)) * FLOAT_RAND_MAX); // Allocate memmory for the vectors int16_t* Y=(int16_t*)malloc(sizeof(int16_t)*size); int16_t* X=(int16_t*)malloc(sizeof(int16_t)*size); int16_t* A=(int16_t*)malloc(sizeof(int16_t)*size); // Generate random vectors generateIntVector(Y, size); generateIntVector(X, size); memset( A, alpha, size*sizeof(int16_t)); int16_t *result; result= SAXPY(size, A, X, Y); start_time = omp_get_wtime(); for (int j = 0; j < size; ++j){ result[j] = alpha*X[j] + Y[j]; } run_time = omp_get_wtime() - start_time; printf("\n Serial run time with size %ld: %f s \n", size, run_time); free(Y); free(X); free(A); return 0; } /* * SAXPY Function R = aX + Y * @param size the lenght of the vectors * @param alpha the const to scale the vector X * @param X a vector of floats * @param Y a vector of floats */ int16_t* SAXPY(long size, int16_t *alpha, int16_t *X, int16_t *Y) { int16_t *result = (int16_t*)malloc(sizeof(int16_t)*size); int i=0, j=0; int16x4_t vecY; int16x4_t vecA; int16x4_t vecX; int16x4_t r ; double start_time, runTime; start_time = omp_get_wtime(); #ifdef PARALLEL #pragma omp parallel for private(i, j) shared(size, alpha, X, Y) #endif for (j=0; j < size; j+=4) { vecY = vld1_s16(Y); vecA = vld1_s16(alpha); vecX = vld1_s16(X); r = vmla_s16(vecY, vecA, vecX); result[i++]=r[0]; result[i++]=r[1]; result[i++]=r[2]; result[i++]=r[3]; Y+=4; alpha+=4; X+=4; } runTime = omp_get_wtime() - start_time; #ifdef PARALLEL printf("\n Parallel run time with size %ld: %f s \n",size, runTime); #endif return result; } /* * Function that fills a vector of size "size" with random numbers * @param (INPUT)size the length of the vector * @param (OUTPUT)vector the place where the data will be stored. */ void generateIntVector(int16_t *vector, long size) { long i; #ifdef PARALLEL #pragma omp parallel for private(i) shared(size, vector) #endif for(i=0; i<size;i++){ vector[i] = ((float)rand()/(float)(RAND_MAX)) * FLOAT_RAND_MAX; } } /* * Function that prints a vector on screen * @param (INPUT)size the length of the vector * @param (INPUT)vector the place where the data will be stored. */ void printIntVector(int16_t *vector, long size) { printf("["); for(long i=0; i<size;i++){ printf(" %hd ", vector[i]); } printf("]\n"); }
omp_ssyr2k_batch.c
/** * @file omp_ssyr2k_batch.c * * @brief BBLAS omp_ssyr2k_batch float routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_zsyr2k_batch.c normal z -> s, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define REAL /** Purpose ------- <b>ssyr2k_batch</b> is a batch version of ssyr2k. It performs one of the matrix-matrix operations arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i], or arrayC[i] = alpha[i]*arrayA**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i], where alpha[i] and beta[i] are scalars, arrayC[i] is an N[i] by N[i] sym- metric matrix and arrayA[i] and arrayB[i] are N[i] by K[i] matrices in the first case and K[i] by N[i] matrices in the second case. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of uplo[0], trans[0], N[0], K[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the upper or lower triangular part of the matrix arrayC[i] is to be referenced as follows: - = 'BblasUpper' Only the upper triangular part of the matrix is to be referenced. - = 'BblasLower' Only the lower triangular part of the matrix is to be referenced. @param[in] trans Array of <tt>enum BBLAS_TRANS</tt>. On entry, trans[i] specifies the operation to be performed as follows: - = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i] - = 'BblasTrans' arrayC[i] = alpha[i]*arrayA[i]**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i]. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of rows and columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] K Array of <tt>int</tt>. On entry with trans[i] = 'BblasNoTrans', K[i] specifies the number of columns of the matrices arrayA[i] and arrayB[i], and upon entry with trans[i] = 'BblasTrans', K[i] specifies the number of rows of the matrices arrayA[i] and arrayB[i]. K[i] must be greater than zero. @param[in] alpha Array of <tt>real_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a REAL matrix of dimension lda[i] by Ka[i], where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise. Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i] part of the arrayA[i] must contain the elements of arrayA[i], otherwise the leading K[i] by N[i] part of the arrayA[i] must contain the elements of arrayA[i]. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When transA[i] = BblasNoTrans then lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at least max( 1, K[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a REAL matrix of dimension lda[i] by Ka[i], where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise. Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i] part of the arrayB[i] must contain the elements of arrayB[i], otherwise the leading K[i] by N[i] part of the arrayB[i] must contain the elements of arrayB[i]. @param[in] ldb Array of <tt>int</tt>. On entry, ldb[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When transA[i] = BblasNoTrans then ldb[i] must be at least max( 1, N[i] ), otherwise ldb[i] must be at least max( 1, K[i] ). @param[in] beta Array of <tt>real_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each elements arrayC[i] is a pointer to a REAL matrix of dimension ldc[i] by N[i]. Before entry with uplo[i] = 'BblasUpper', the leading N[i] by N[i] upper triangular part of the arrayC[i] must con- tain the upper triangular part of the symmetric matrix and the strictly lower triangular part of arrayC[i] is not referenced. On exit, the upper triangular part of the arrayC[i] is overwritten by the upper triangular part of the updated matrix. Before entry with uplo[i] = 'BlasLower', the leading N[i] by N[i] lower triangular part of the arrayC[i] must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of arrayC[i] is not referenced. On exit, the lower triangular part of the arrayC[i] is overwritten by the lower triangular part of the updated matrix. @param[in] ldc Array of <tt>int</tt>. On entry, ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith ssyr2k in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_ssyr2k_batch( const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans, const int *N, const int *K, const float *alpha, const float **arrayA, const int *lda, const float **arrayB, const int *ldb, const float *beta, float **arrayC, const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA, LDB; char func_name[15] = "ssyr2k_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if ((trans[first_index] != BblasNoTrans) && (trans[first_index] != BblasTrans) && (trans[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANS; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (K[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_K; } return; } if (trans[first_index] == BblasNoTrans) { LDA = N[first_index]; LDB = N[first_index]; } else { LDA = K[first_index]; LDB = K[first_index]; } if (lda[first_index] < max(1,LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, N[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (N[first_index] == 0 || K[first_index] == 0 || (alpha[first_index] == (float)0.0 || beta[first_index] == (float)1.0)) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private(batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_ssyr2k */ cblas_ssyr2k( BblasColMajor, uplo[first_index], trans[first_index], N[first_index], K[first_index], (alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index], (beta[first_index]), arrayC[batch_iter], ldc[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private (batch_iter, LDA, LDB) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if ((trans[batch_iter] != BblasNoTrans) && (trans[batch_iter] != BblasTrans) && (trans[batch_iter] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter); info[batch_iter] = BBLAS_ERR_TRANS; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (K[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, batch_iter); info[batch_iter] = BBLAS_ERR_K; continue; } if (trans[batch_iter] == BblasNoTrans) { LDA = N[batch_iter]; LDB = N[batch_iter]; } else { LDA = K[batch_iter]; LDB = K[batch_iter]; } if (lda[batch_iter] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter); info[batch_iter] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter] < max(1, N[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (N[batch_iter] == 0 || K[batch_iter] == 0 || ((alpha[batch_iter] == (float)0.0) && beta[batch_iter] == (float)1.0)) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_ssyr2k( BblasColMajor, uplo[batch_iter], trans[batch_iter], N[batch_iter], K[batch_iter], (alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter], (beta[batch_iter]), arrayC[batch_iter], ldc[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } }else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef REAL
simulation.h
//! \file simulation.h //! \brief Variables/functions related to a running simulation #ifndef OPENMC_SIMULATION_H #define OPENMC_SIMULATION_H #include "openmc/particle.h" #include <cstdint> #include <vector> namespace openmc { constexpr int STATUS_EXIT_NORMAL {0}; constexpr int STATUS_EXIT_MAX_BATCH {1}; constexpr int STATUS_EXIT_ON_TRIGGER {2}; //============================================================================== // Global variable declarations //============================================================================== namespace simulation { extern "C" int current_batch; //!< current batch extern "C" int current_gen; //!< current fission generation extern "C" int64_t current_work; //!< index in source back of current particle extern "C" bool initialized; //!< has simulation been initialized? extern "C" double keff; //!< average k over batches extern "C" double keff_std; //!< standard deviation of average k extern "C" double k_col_abs; //!< sum over batches of k_collision * k_absorption extern "C" double k_col_tra; //!< sum over batches of k_collision * k_tracklength extern "C" double k_abs_tra; //!< sum over batches of k_absorption * k_tracklength extern double log_spacing; //!< lethargy spacing for energy grid searches extern "C" int n_lost_particles; //!< cumulative number of lost particles extern "C" bool need_depletion_rx; //!< need to calculate depletion rx? extern "C" int restart_batch; //!< batch at which a restart job resumed extern "C" bool satisfy_triggers; //!< have tally triggers been satisfied? extern "C" int total_gen; //!< total number of generations simulated extern double total_weight; //!< Total source weight in a batch extern int64_t work_per_rank; //!< number of particles per MPI rank extern std::vector<double> k_generation; extern std::vector<int64_t> work_index; // Threadprivate variables extern "C" bool trace; //!< flag to show debug information #pragma omp threadprivate(current_work, trace) } // namespace simulation //============================================================================== // Functions //============================================================================== //! Allocate space for source and fission banks void allocate_banks(); //! Determine number of particles to transport per process void calculate_work(); //! Initialize a batch void initialize_batch(); //! Initialize a fission generation void initialize_generation(); void initialize_history(Particle* p, int64_t index_source); //! Finalize a batch //! //! Handles synchronization and accumulation of tallies, calculation of Shannon //! entropy, getting single-batch estimate of keff, and turning on tallies when //! appropriate void finalize_batch(); //! Finalize a fission generation void finalize_generation(); //! Determine overall generation number extern "C" int overall_generation(); #ifdef OPENMC_MPI void broadcast_results(); #endif void free_memory_simulation(); } // namespace openmc #endif // OPENMC_SIMULATION_H
deadlock.c
#include <omp.h> #include <stdio.h> omp_lock_t A, B; void T1() { printf("Task 1: before locking A\n"); omp_set_lock(&A); printf("Task 1: after locking A\n"); sleep(2); printf("Task 1: before locking B\n"); omp_set_lock(&B); printf("Task 1: after locking B\n"); printf("Task 1\n"); omp_unset_lock(&B); omp_unset_lock(&A); } void T2() { printf("Task 2: before locking B\n"); omp_set_lock(&B); printf("Task 2: after locking B\n"); printf("Task 2: before locking A\n"); omp_set_lock(&A); printf("Task 2: after locking A\n"); printf("Task 2\n"); omp_unset_lock(&A); omp_unset_lock(&B); } int main() { omp_init_lock(&A); omp_init_lock(&B); #pragma omp parallel { #pragma omp single { #pragma omp task T1(); #pragma omp task T2(); } } }
GB_unop__identity_fc64_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_uint32) // op(A') function: GB (_unop_tran__identity_fc64_uint32) // C type: GxB_FC64_t // A type: uint32_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_uint32) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); exception=(&image->exception); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
elastic_kernel_3d_so4.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict block_sizes_vec, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_fxx_vec, struct dataobj *restrict save_src_fyy_vec, struct dataobj *restrict save_src_fzz_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict tau_sol_xx_vec, struct dataobj *restrict tau_sol_xy_vec, struct dataobj *restrict tau_sol_xz_vec, struct dataobj *restrict tau_sol_yy_vec, struct dataobj *restrict tau_sol_yz_vec, struct dataobj *restrict tau_sol_zz_vec, struct dataobj *restrict v_sol_x_vec, struct dataobj *restrict v_sol_y_vec, struct dataobj *restrict v_sol_z_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_fxx)[save_src_fxx_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fxx_vec->size[1]])save_src_fxx_vec->data; float(*restrict save_src_fyy)[save_src_fyy_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fyy_vec->size[1]])save_src_fyy_vec->data; float(*restrict save_src_fzz)[save_src_fzz_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fzz_vec->size[1]])save_src_fzz_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict tau_sol_xx)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]])tau_sol_xx_vec->data; float(*restrict tau_sol_xy)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]])tau_sol_xy_vec->data; float(*restrict tau_sol_xz)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]])tau_sol_xz_vec->data; float(*restrict tau_sol_yy)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]])tau_sol_yy_vec->data; float(*restrict tau_sol_yz)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]])tau_sol_yz_vec->data; float(*restrict tau_sol_zz)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]])tau_sol_zz_vec->data; float(*restrict v_sol_x)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]])v_sol_x_vec->data; float(*restrict v_sol_y)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]])v_sol_y_vec->data; float(*restrict v_sol_z)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]])v_sol_z_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int sf = 4; int t_blk_size = 2 * sf * (time_M - time_m); /* int xb_size = 64; int yb_size = 64; x0_blk0_size = 8; y0_blk0_size = 8; */ printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size , yb_size , x0_blk0_size, y0_blk0_size); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { for (int time = t_blk, t0 = (time) % (2), t1 = (time + 1) % (2); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (2), t1 = (((time / sf) % (time_M - time_m + 1))) % (2)) { int tw = ((time / sf) % (time_M - time_m + 1)); #pragma omp parallel num_threads(nthreads) { //printf(" Change of time block : %d \n", tw); #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { //printf(" Change of inner xblock %d \n", x0_blk0); for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" Updating velocity x %d \n", x - time + 4); //printf(" \n PDE update : \n"); #pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32) for (int z = z_m; z <= z_M; z += 1) { //printf(" Updating velocity x %d z: %d \n", x - time + 4, z + 4); float r26 = 1.0 / h_z; float r25 = 1.0 / h_y; float r24 = 1.0 / h_x; v_sol_x[t1][x - time + 4][y - time + 4][z + 4] = r24 * (2.7280354210856e-2F * (tau_sol_xx[t0][x - time + 3][y - time + 4][z + 4] - tau_sol_xx[t0][x - time + 6][y - time + 4][z + 4]) + 7.36569563735987e-1F * (-tau_sol_xx[t0][x - time + 4][y - time + 4][z + 4] + tau_sol_xx[t0][x - time + 5][y - time + 4][z + 4])) + r25 * (2.7280354210856e-2F * (tau_sol_xy[t0][x - time + 4][y - time + 2][z + 4] - tau_sol_xy[t0][x - time + 4][y - time + 5][z + 4]) + 7.36569563735987e-1F * (-tau_sol_xy[t0][x - time + 4][y - time + 3][z + 4] + tau_sol_xy[t0][x - time + 4][y - time + 4][z + 4])) + r26 * (2.7280354210856e-2F * (tau_sol_xz[t0][x - time + 4][y - time + 4][z + 2] - tau_sol_xz[t0][x - time + 4][y - time + 4][z + 5]) + 7.36569563735987e-1F * (-tau_sol_xz[t0][x - time + 4][y - time + 4][z + 3] + tau_sol_xz[t0][x - time + 4][y - time + 4][z + 4])) + v_sol_x[t0][x - time + 4][y - time + 4][z + 4]; v_sol_y[t1][x - time + 4][y - time + 4][z + 4] = r24 * (2.7280354210856e-2F * (tau_sol_xy[t0][x - time + 2][y - time + 4][z + 4] - tau_sol_xy[t0][x - time + 5][y - time + 4][z + 4]) + 7.36569563735987e-1F * (-tau_sol_xy[t0][x - time + 3][y - time + 4][z + 4] + tau_sol_xy[t0][x - time + 4][y - time + 4][z + 4])) + r25 * (2.7280354210856e-2F * (tau_sol_yy[t0][x - time + 4][y - time + 3][z + 4] - tau_sol_yy[t0][x - time + 4][y - time + 6][z + 4]) + 7.36569563735987e-1F * (-tau_sol_yy[t0][x - time + 4][y - time + 4][z + 4] + tau_sol_yy[t0][x - time + 4][y - time + 5][z + 4])) + r26 * (2.7280354210856e-2F * (tau_sol_yz[t0][x - time + 4][y - time + 4][z + 2] - tau_sol_yz[t0][x - time + 4][y - time + 4][z + 5]) + 7.36569563735987e-1F * (-tau_sol_yz[t0][x - time + 4][y - time + 4][z + 3] + tau_sol_yz[t0][x - time + 4][y - time + 4][z + 4])) + v_sol_y[t0][x - time + 4][y - time + 4][z + 4]; v_sol_z[t1][x - time + 4][y - time + 4][z + 4] = r24 * (2.7280354210856e-2F * (tau_sol_xz[t0][x - time + 2][y - time + 4][z + 4] - tau_sol_xz[t0][x - time + 5][y - time + 4][z + 4]) + 7.36569563735987e-1F * (-tau_sol_xz[t0][x - time + 3][y - time + 4][z + 4] + tau_sol_xz[t0][x - time + 4][y - time + 4][z + 4])) + r25 * (2.7280354210856e-2F * (tau_sol_yz[t0][x - time + 4][y - time + 2][z + 4] - tau_sol_yz[t0][x - time + 4][y - time + 5][z + 4]) + 7.36569563735987e-1F * (-tau_sol_yz[t0][x - time + 4][y - time + 3][z + 4] + tau_sol_yz[t0][x - time + 4][y - time + 4][z + 4])) + r26 * (2.7280354210856e-2F * (tau_sol_zz[t0][x - time + 4][y - time + 4][z + 3] - tau_sol_zz[t0][x - time + 4][y - time + 4][z + 6]) + 7.36569563735987e-1F * (-tau_sol_zz[t0][x - time + 4][y - time + 4][z + 4] + tau_sol_zz[t0][x - time + 4][y - time + 4][z + 5])) + v_sol_z[t0][x - time + 4][y - time + 4][z + 4]; } } } } } } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb - 2); x0_blk0 <= +min((x_M + time), (xb - 2 + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb - 2); y0_blk0 <= +min((y_M + time), (yb - 2 + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb - 2 + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb - 2 + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" Updating stress x %d \n", x - time + 4); #pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32) for (int z = z_m; z <= z_M; z += 1) { //printf(" Updating x %d z: %d \n", x - time + 4, z + 4); float r41 = -v_sol_z[t1][x - time + 4][y - time + 4][z + 4]; float r40 = -v_sol_y[t1][x - time + 4][y - time + 4][z + 4]; float r39 = -v_sol_x[t1][x - time + 4][y - time + 4][z + 4]; float r38 = v_sol_y[t1][x - time + 4][y - time + 2][z + 4] - v_sol_y[t1][x - time + 4][y - time + 5][z + 4]; float r37 = -v_sol_y[t1][x - time + 4][y - time + 3][z + 4] + v_sol_y[t1][x - time + 4][y - time + 4][z + 4]; float r36 = v_sol_z[t1][x - time + 4][y - time + 4][z + 2] - v_sol_z[t1][x - time + 4][y - time + 4][z + 5]; float r35 = -v_sol_z[t1][x - time + 4][y - time + 4][z + 3] + v_sol_z[t1][x - time + 4][y - time + 4][z + 4]; float r34 = v_sol_x[t1][x - time + 2][y - time + 4][z + 4] - v_sol_x[t1][x - time + 5][y - time + 4][z + 4]; float r33 = -v_sol_x[t1][x - time + 3][y - time + 4][z + 4] + v_sol_x[t1][x - time + 4][y - time + 4][z + 4]; float r32 = 1.0 / h_y; float r31 = 1.0 / h_z; float r30 = 1.0 / h_x; float r29 = r30 * (4.7729707730092F * r33 + 1.76776695286347e-1F * r34); float r28 = r31 * (4.7729707730092F * r35 + 1.76776695286347e-1F * r36); float r27 = r32 * (4.7729707730092F * r37 + 1.76776695286347e-1F * r38); tau_sol_xx[t1][x - time + 4][y - time + 4][z + 4] = r27 + r28 + r30 * (9.54594154601839F * r33 + 3.53553390572694e-1F * r34) + tau_sol_xx[t0][x - time + 4][y - time + 4][z + 4]; tau_sol_xy[t1][x - time + 4][y - time + 4][z + 4] = r30 * (2.3864853865046F * (r40 + v_sol_y[t1][x - time + 5][y - time + 4][z + 4]) + 8.83883476431735e-2F * (v_sol_y[t1][x - time + 3][y - time + 4][z + 4] - v_sol_y[t1][x - time + 6][y - time + 4][z + 4])) + r32 * (2.3864853865046F * (r39 + v_sol_x[t1][x - time + 4][y - time + 5][z + 4]) + 8.83883476431735e-2F * (v_sol_x[t1][x - time + 4][y - time + 3][z + 4] - v_sol_x[t1][x - time + 4][y - time + 6][z + 4])) + tau_sol_xy[t0][x - time + 4][y - time + 4][z + 4]; tau_sol_xz[t1][x - time + 4][y - time + 4][z + 4] = r30 * (2.3864853865046F * (r41 + v_sol_z[t1][x - time + 5][y - time + 4][z + 4]) + 8.83883476431735e-2F * (v_sol_z[t1][x - time + 3][y - time + 4][z + 4] - v_sol_z[t1][x - time + 6][y - time + 4][z + 4])) + r31 * (2.3864853865046F * (r39 + v_sol_x[t1][x - time + 4][y - time + 4][z + 5]) + 8.83883476431735e-2F * (v_sol_x[t1][x - time + 4][y - time + 4][z + 3] - v_sol_x[t1][x - time + 4][y - time + 4][z + 6])) + tau_sol_xz[t0][x - time + 4][y - time + 4][z + 4]; tau_sol_yy[t1][x - time + 4][y - time + 4][z + 4] = r28 + r29 + r32 * (9.54594154601839F * r37 + 3.53553390572694e-1F * r38) + tau_sol_yy[t0][x - time + 4][y - time + 4][z + 4]; tau_sol_yz[t1][x - time + 4][y - time + 4][z + 4] = r31 * (2.3864853865046F * (r40 + v_sol_y[t1][x - time + 4][y - time + 4][z + 5]) + 8.83883476431735e-2F * (v_sol_y[t1][x - time + 4][y - time + 4][z + 3] - v_sol_y[t1][x - time + 4][y - time + 4][z + 6])) + r32 * (2.3864853865046F * (r41 + v_sol_z[t1][x - time + 4][y - time + 5][z + 4]) + 8.83883476431735e-2F * (v_sol_z[t1][x - time + 4][y - time + 3][z + 4] - v_sol_z[t1][x - time + 4][y - time + 6][z + 4])) + tau_sol_yz[t0][x - time + 4][y - time + 4][z + 4]; tau_sol_zz[t1][x - time + 4][y - time + 4][z + 4] = r27 + r29 + r31 * (9.54594154601839F * r35 + 3.53553390572694e-1F * r36) + tau_sol_zz[t0][x - time + 4][y - time + 4][z + 4]; } for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { //printf("\n Source_injection at : "); int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_fxx[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; float r1 = save_src_fyy[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; float r2 = save_src_fzz[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; tau_sol_xx[t1][x - time + 4][y - time + 4][zind + 4] += r0; tau_sol_yy[t1][x - time + 4][y - time + 4][zind + 4] += r1; tau_sol_zz[t1][x - time + 4][y - time + 4][zind + 4] += r2; //printf(" Time %d , at : %d, %d \n", tw, x - time + 4, zind + 4); } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; }
GB_binop__iseq_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_fp64 // A.*B function (eWiseMult): GB_AemultB__iseq_fp64 // A*D function (colscale): GB_AxD__iseq_fp64 // D*A function (rowscale): GB_DxB__iseq_fp64 // C+=B function (dense accum): GB_Cdense_accumB__iseq_fp64 // C+=b function (dense accum): GB_Cdense_accumb__iseq_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_fp64 // C=scalar+B GB_bind1st__iseq_fp64 // C=scalar+B' GB_bind1st_tran__iseq_fp64 // C=A+scalar GB_bind2nd__iseq_fp64 // C=A'+scalar GB_bind2nd_tran__iseq_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FP64 || GxB_NO_ISEQ_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
libomp_interface.h
// This file does not contain any code; it just contains additional text and formatting // for doxygen. //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// /*! @mainpage LLVM&nbsp; OpenMP* Runtime Library Interface @section sec_intro Introduction This document describes the interface provided by the LLVM &nbsp;OpenMP\other runtime library to the compiler. Routines that are directly called as simple functions by user code are not currently described here, since their definition is in the OpenMP specification available from http://openmp.org The aim here is to explain the interface from the compiler to the runtime. The overall design is described, and each function in the interface has its own description. (At least, that's the ambition, we may not be there yet). @section sec_building Quickly Building the Runtime For the impatient, we cover building the runtime as the first topic here. CMake is used to build the OpenMP runtime. For details and a full list of options for the CMake build system, see <tt>Build_With_CMake.txt</tt> inside the <tt>runtime/</tt> subdirectory. These instructions will provide the most typical build. In-LLVM-tree build:. @code $ cd where-you-want-to-live Check out openmp into llvm/projects $ cd where-you-want-to-build $ mkdir build && cd build $ cmake path/to/llvm -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler> $ make omp @endcode Out-of-LLVM-tree build: @code $ cd where-you-want-to-live Check out openmp $ cd where-you-want-to-live/openmp/runtime $ mkdir build && cd build $ cmake path/to/openmp -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler> $ make @endcode @section sec_supported Supported RTL Build Configurations The architectures supported are IA-32 architecture, Intel&reg;&nbsp; 64, and Intel&reg;&nbsp; Many Integrated Core Architecture. The build configurations supported are shown in the table below. <table border=1> <tr><th> <th>icc/icl<th>gcc<th>clang <tr><td>Linux\other OS<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7) <tr><td>FreeBSD\other<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7,8) <tr><td>OS X\other<td>Yes(1,3,4)<td>No<td>Yes(4,6,7) <tr><td>Windows\other OS<td>Yes(1,4)<td>No<td>No </table> (1) On IA-32 architecture and Intel&reg;&nbsp; 64, icc/icl versions 12.x are supported (12.1 is recommended).<br> (2) gcc version 4.7 is supported.<br> (3) For icc on OS X\other, OS X\other version 10.5.8 is supported.<br> (4) Intel&reg;&nbsp; Many Integrated Core Architecture not supported.<br> (5) On Intel&reg;&nbsp; Many Integrated Core Architecture, icc/icl versions 13.0 or later are required.<br> (6) Clang\other version 3.3 is supported.<br> (7) Clang\other currently does not offer a software-implemented 128 bit extended precision type. Thus, all entry points reliant on this type are removed from the library and cannot be called in the user program. The following functions are not available: @code __kmpc_atomic_cmplx16_* __kmpc_atomic_float16_* __kmpc_atomic_*_fp @endcode (8) Community contribution provided AS IS, not tested by Intel. Supported Architectures: IBM(R) Power 7 and Power 8 <table border=1> <tr><th> <th>gcc<th>clang <tr><td>Linux\other OS<td>Yes(1,2)<td>Yes(3,4) </table> (1) On Power 7, gcc version 4.8.2 is supported.<br> (2) On Power 8, gcc version 4.8.2 is supported.<br> (3) On Power 7, clang version 3.7 is supported.<br> (4) On Power 8, clang version 3.7 is supported.<br> @section sec_frontend Front-end Compilers that work with this RTL The following compilers are known to do compatible code generation for this RTL: icc/icl, gcc. Code generation is discussed in more detail later in this document. @section sec_outlining Outlining The runtime interface is based on the idea that the compiler "outlines" sections of code that are to run in parallel into separate functions that can then be invoked in multiple threads. For instance, simple code like this @code void foo() { #pragma omp parallel { ... do something ... } } @endcode is converted into something that looks conceptually like this (where the names used are merely illustrative; the real library function names will be used later after we've discussed some more issues...) @code static void outlinedFooBody() { ... do something ... } void foo() { __OMP_runtime_fork(outlinedFooBody, (void*)0); // Not the real function name! } @endcode @subsection SEC_SHAREDVARS Addressing shared variables In real uses of the OpenMP\other API there are normally references from the outlined code to shared variables that are in scope in the containing function. Therefore the containing function must be able to address these variables. The runtime supports two alternate ways of doing this. @subsubsection SEC_SEC_OT Current Technique The technique currently supported by the runtime library is to receive a separate pointer to each shared variable that can be accessed from the outlined function. This is what is shown in the example below. We hope soon to provide an alternative interface to support the alternate implementation described in the next section. The alternative implementation has performance advantages for small parallel regions that have many shared variables. @subsubsection SEC_SEC_PT Future Technique The idea is to treat the outlined function as though it were a lexically nested function, and pass it a single argument which is the pointer to the parent's stack frame. Provided that the compiler knows the layout of the parent frame when it is generating the outlined function it can then access the up-level variables at appropriate offsets from the parent frame. This is a classical compiler technique from the 1960s to support languages like Algol (and its descendants) that support lexically nested functions. The main benefit of this technique is that there is no code required at the fork point to marshal the arguments to the outlined function. Since the runtime knows statically how many arguments must be passed to the outlined function, it can easily copy them to the thread's stack frame. Therefore the performance of the fork code is independent of the number of shared variables that are accessed by the outlined function. If it is hard to determine the stack layout of the parent while generating the outlined code, it is still possible to use this approach by collecting all of the variables in the parent that are accessed from outlined functions into a single `struct` which is placed on the stack, and whose address is passed to the outlined functions. In this way the offsets of the shared variables are known (since they are inside the struct) without needing to know the complete layout of the parent stack-frame. From the point of view of the runtime either of these techniques is equivalent, since in either case it only has to pass a single argument to the outlined function to allow it to access shared variables. A scheme like this is how gcc\other generates outlined functions. @section SEC_INTERFACES Library Interfaces The library functions used for specific parts of the OpenMP\other language implementation are documented in different modules. - @ref BASIC_TYPES fundamental types used by the runtime in many places - @ref DEPRECATED functions that are in the library but are no longer required - @ref STARTUP_SHUTDOWN functions for initializing and finalizing the runtime - @ref PARALLEL functions for implementing `omp parallel` - @ref THREAD_STATES functions for supporting thread state inquiries - @ref WORK_SHARING functions for work sharing constructs such as `omp for`, `omp sections` - @ref THREADPRIVATE functions to support thread private data, copyin etc - @ref SYNCHRONIZATION functions to support `omp critical`, `omp barrier`, `omp master`, reductions etc - @ref ATOMIC_OPS functions to support atomic operations - @ref STATS_GATHERING macros to support developer profiling of libomp - Documentation on tasking has still to be written... @section SEC_EXAMPLES Examples @subsection SEC_WORKSHARING_EXAMPLE Work Sharing Example This example shows the code generated for a parallel for with reduction and dynamic scheduling. @code extern float foo( void ); int main () { int i; float r = 0.0; #pragma omp parallel for schedule(dynamic) reduction(+:r) for ( i = 0; i < 10; i ++ ) { r += foo(); } } @endcode The transformed code looks like this. @code extern float foo( void ); int main () { static int zero = 0; auto int gtid; auto float r = 0.0; __kmpc_begin( & loc3, 0 ); // The gtid is not actually required in this example so could be omitted; // We show its initialization here because it is often required for calls into // the runtime and should be locally cached like this. gtid = __kmpc_global thread num( & loc3 ); __kmpc_fork call( & loc7, 1, main_7_parallel_3, & r ); __kmpc_end( & loc0 ); return 0; } struct main_10_reduction_t_5 { float r_10_rpr; }; static kmp_critical_name lck = { 0 }; static ident_t loc10; // loc10.flags should contain KMP_IDENT_ATOMIC_REDUCE bit set // if compiler has generated an atomic reduction. void main_7_parallel_3( int *gtid, int *btid, float *r_7_shp ) { auto int i_7_pr; auto int lower, upper, liter, incr; auto struct main_10_reduction_t_5 reduce; reduce.r_10_rpr = 0.F; liter = 0; __kmpc_dispatch_init_4( & loc7,*gtid, 35, 0, 9, 1, 1 ); while ( __kmpc_dispatch_next_4( & loc7, *gtid, & liter, & lower, & upper, & incr ) ) { for( i_7_pr = lower; upper >= i_7_pr; i_7_pr ++ ) reduce.r_10_rpr += foo(); } switch( __kmpc_reduce_nowait( & loc10, *gtid, 1, 4, & reduce, main_10_reduce_5, & lck ) ) { case 1: *r_7_shp += reduce.r_10_rpr; __kmpc_end_reduce_nowait( & loc10, *gtid, & lck ); break; case 2: __kmpc_atomic_float4_add( & loc10, *gtid, r_7_shp, reduce.r_10_rpr ); break; default:; } } void main_10_reduce_5( struct main_10_reduction_t_5 *reduce_lhs, struct main_10_reduction_t_5 *reduce_rhs ) { reduce_lhs->r_10_rpr += reduce_rhs->r_10_rpr; } @endcode @defgroup BASIC_TYPES Basic Types Types that are used throughout the runtime. @defgroup DEPRECATED Deprecated Functions Functions in this group are for backwards compatibility only, and should not be used in new code. @defgroup STARTUP_SHUTDOWN Startup and Shutdown These functions are for library initialization and shutdown. @defgroup PARALLEL Parallel (fork/join) These functions are used for implementing <tt>\#pragma omp parallel</tt>. @defgroup THREAD_STATES Thread Information These functions return information about the currently executing thread. @defgroup WORK_SHARING Work Sharing These functions are used for implementing <tt>\#pragma omp for</tt>, <tt>\#pragma omp sections</tt>, <tt>\#pragma omp single</tt> and <tt>\#pragma omp master</tt> constructs. When handling loops, there are different functions for each of the signed and unsigned 32 and 64 bit integer types which have the name suffixes `_4`, `_4u`, `_8` and `_8u`. The semantics of each of the functions is the same, so they are only described once. Static loop scheduling is handled by @ref __kmpc_for_static_init_4 and friends. Only a single call is needed, since the iterations to be executed by any give thread can be determined as soon as the loop parameters are known. Dynamic scheduling is handled by the @ref __kmpc_dispatch_init_4 and @ref __kmpc_dispatch_next_4 functions. The init function is called once in each thread outside the loop, while the next function is called each time that the previous chunk of work has been exhausted. @defgroup SYNCHRONIZATION Synchronization These functions are used for implementing barriers. @defgroup THREADPRIVATE Thread private data support These functions support copyin/out and thread private data. @defgroup STATS_GATHERING Statistics Gathering from OMPTB These macros support profiling the libomp library. Use --stats=on when building with build.pl to enable and then use the KMP_* macros to profile (through counts or clock ticks) libomp during execution of an OpenMP program. @section sec_stats_env_vars Environment Variables This section describes the environment variables relevant to stats-gathering in libomp @code KMP_STATS_FILE @endcode This environment variable is set to an output filename that will be appended *NOT OVERWRITTEN* if it exists. If this environment variable is undefined, the statistics will be output to stderr @code KMP_STATS_THREADS @endcode This environment variable indicates to print thread-specific statistics as well as aggregate statistics. Each thread's statistics will be shown as well as the collective sum of all threads. The values "true", "on", "1", "yes" will all indicate to print per thread statistics. @defgroup TASKING Tasking support These functions support tasking constructs. @defgroup USER User visible functions These functions can be called directly by the user, but are runtime library specific, rather than being OpenMP interfaces. */
a4.c
#define N 10000 int a[N],b[N],c[N]; long long s=0; main() { int i; /* inicialitzacio, no en paral.lel */ for(i=0;i<N;i++) { a[i]=1; b[i]=2; c[i]=0; } #pragma omp parallel { #pragma omp for nowait for (i=0;i<N;i++) b[i] += a[i]; #pragma omp for nowait for (i=0;i<N;i++) c[i] += b[i]; } /* checksum */ s=0; for (i=0;i<N;i++) s+=b[i]; printf("Valor %d, de b %d suma total: %ld\n",i-1,b[i-1],s); /* checksum */ s=0; #pragma omp parallel for for (i=0;i<N;i++) s+=c[i]; printf("Valor %d, de c %d suma total: %ld\n",i-1,c[i-1],s); }
DRB089-dynamic-storage2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* For the case of a variable which is referenced within a construct: objects with dynamic storage duration should be shared. Putting it within a threadprivate directive may cause seg fault since threadprivate copies are not allocated. Dependence pair: *counter@73:7 vs. *counter@73:7 */ #include<stdio.h> #include<stdlib.h> int* counter; //#pragma omp threadprivate(counter) int main() { counter = (int*) malloc(sizeof(int)); if (counter== NULL) { fprintf(stderr, "malloc() failes\n"); exit(1); } *counter = 0; #pragma omp parallel { (*counter)++; } printf("%d \n", *counter); free (counter); return 0; }
GB_unop__minv_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_fc64_fc64) // op(A') function: GB (_unop_tran__minv_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_FC64_minv (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC64_minv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_FC64_minv (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_minv (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_FC64_minv (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include<stdio.h> #include<omp.h> int main(){ // Define the domain float x_len = 2.0; // Length of the domain int x_points = 101; // Number of points to consider float del_x = x_len/(x_points-1); // Length of an element float x[x_points]; #pragma omp parallel for for (int i = 0; i < x_points; i++){ x[i] = i * del_x; // x co-ordinates } printf("\n The value of x \n"); for (int i = 0; i < x_points; i++){ printf("%f \t", x[i]); } // Define the parameters int t_itrs = 2500; // number of time iterations float del_t = 0.001; float c = 1.0; // speed of wave float u[x_points]; // Velocity at current time float u_new[x_points]; // Velocity at next time interval for (int i = 0; i < x_points; i++){ if (x[i] > 0.5 && x[i] < 1.0){ u[i] = 2.0; u_new[i] = 2.0; } else{ u[i] = 1.0; u_new[i] = 1.0; } } printf("\n The initial value of u is \n"); for (int i = 0; i < x_points; i++){ printf("%f \t", u[i]); } // Loop iterations #pragma omp parallel for (int it = 0; it < t_itrs; it++){ #pragma omp for nowait for (int i = 1; i < x_points; i++){ u_new[i] = u[i] - (c*del_t/del_x)*(u[i] - u[i-1]); } #pragma omp for for (int i = 0; i < x_points; i++){ u[i] = u_new[i]; } } printf("\n The value of u at the end of the iterations \n"); for (int i = 0; i < x_points; i++){ printf("%f \t", u[i]); } }
sort.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ /* * this program uses an algorithm that we call `cilksort'. * The algorithm is essentially mergesort: * * cilksort(in[1..n]) = * spawn cilksort(in[1..n/2], tmp[1..n/2]) * spawn cilksort(in[n/2..n], tmp[n/2..n]) * sync * spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n]) * * * The procedure cilkmerge does the following: * * cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) = * find the median of A \union B using binary * search. The binary search gives a pair * (ma, mb) such that ma + mb = (n + m)/2 * and all elements in A[1..ma] are smaller than * B[mb..m], and all the B[1..mb] are smaller * than all elements in A[ma..n]. * * spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2]) * spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)]) * sync * * The algorithm appears for the first time (AFAIK) in S. G. Akl and * N. Santoro, "Optimal Parallel Merging and Sorting Without Memory * Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The * paper does not express the algorithm using recursion, but the * idea of finding the median is there. * * For cilksort of n elements, T_1 = O(n log n) and * T_\infty = O(log^3 n). There is a way to shave a * log factor in the critical path (left as homework). */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <omp.h> #include "BenchmarksUtil.h" typedef long ELM; ELM *array, *tmp, *seq, *tmpseq; static unsigned long rand_nxt = 0; int size = 32 * 1024 * 1024; int sequential_merge_cutoff =2048; int quicksort_cutoff = 2048; int insertion_cutoff = 20; static inline unsigned long my_rand(void) { rand_nxt = rand_nxt * 1103515245 + 12345; return rand_nxt; } static inline void my_srand(unsigned long seed) { rand_nxt = seed; } static inline ELM med3(ELM a, ELM b, ELM c) { if (a < b) { if (b < c) { return b; } else { if (a < c) return c; else return a; } } else { if (b > c) { return b; } else { if (a > c) return c; else return a; } } } /* * simple approach for now; a better median-finding * may be preferable */ static inline ELM choose_pivot(ELM *low, ELM *high) { return med3(*low, *high, low[(high - low) / 2]); } static ELM *seqpart(ELM *low, ELM *high) { ELM pivot; ELM h, l; ELM *curr_low = low; ELM *curr_high = high; pivot = choose_pivot(low, high); while (1) { while ((h = *curr_high) > pivot) curr_high--; while ((l = *curr_low) < pivot) curr_low++; if (curr_low >= curr_high) break; *curr_high-- = l; *curr_low++ = h; } /* * I don't know if this is really necessary. * The problem is that the pivot is not always the * first element, and the partition may be trivial. * However, if the partition is trivial, then * *high is the largest element, whence the following * code. */ if (curr_high < high) return curr_high; else return curr_high - 1; } #define swap(a, b) \ { \ ELM tmp;\ tmp = a;\ a = b;\ b = tmp;\ } static void insertion_sort(ELM *low, ELM *high) { ELM *p, *q; ELM a, b; for (q = low + 1; q <= high; ++q) { a = q[0]; for (p = q - 1; p >= low && (b = p[0]) > a; p--) p[1] = b; p[1] = a; } } /* * tail-recursive quicksort, almost unrecognizable :-) */ void seqquick(ELM *low, ELM *high) { ELM *p; while (high - low >= insertion_cutoff) { p = seqpart(low, high); seqquick(low, p); low = p + 1; } insertion_sort(low, high); } void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { ELM a1, a2; /* * The following 'if' statement is not necessary * for the correctness of the algorithm, and is * in fact subsumed by the rest of the function. * However, it is a few percent faster. Here is why. * * The merging loop below has something like * if (a1 < a2) { * *dest++ = a1; * ++low1; * if (end of array) break; * a1 = *low1; * } * * Now, a1 is needed immediately in the next iteration * and there is no way to mask the latency of the load. * A better approach is to load a1 *before* the end-of-array * check; the problem is that we may be speculatively * loading an element out of range. While this is * probably not a problem in practice, yet I don't feel * comfortable with an incorrect algorithm. Therefore, * I use the 'fast' loop on the array (except for the last * element) and the 'slow' loop for the rest, saving both * performance and correctness. */ if (low1 < high1 && low2 < high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; a1 = *++low1; if (low1 >= high1) break; } else { *lowdest++ = a2; a2 = *++low2; if (low2 >= high2) break; } } } if (low1 <= high1 && low2 <= high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; ++low1; if (low1 > high1) break; a1 = *low1; } else { *lowdest++ = a2; ++low2; if (low2 > high2) break; a2 = *low2; } } } if (low1 > high1) { memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1)); } else { memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1)); } } #define swap_indices(a, b) \ { \ ELM *tmp;\ tmp = a;\ a = b;\ b = tmp;\ } ELM *binsplit(ELM val, ELM *low, ELM *high) { /* * returns index which contains greatest element <= val. If val is * less than all elements, returns low-1 */ ELM *mid; while (low != high) { mid = low + ((high - low + 1) >> 1); if (val <= *mid) high = mid - 1; else low = mid; } if (*low > val) return low - 1; else return low; } void cilkmerge_seq(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { /* * Cilkmerge: Merges range [low1, high1] with range [low2, high2] * into the range [lowdest, ...] */ ELM *split1, *split2; /* * where each of the ranges are broken for * recursive merge */ long int lowsize; /* * total size of lower halves of two * ranges - 2 */ /* * We want to take the middle element (indexed by split1) from the * larger of the two arrays. The following code assumes that split1 * is taken from range [low1, high1]. So if [low1, high1] is * actually the smaller range, we should swap it with [low2, high2] */ if (high2 - low2 > high1 - low1) { swap_indices(low1, low2); swap_indices(high1, high2); } if (high2 < low2) { /* smaller range is empty */ memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1)); return; } if (high2 - low2 < sequential_merge_cutoff ) { seqmerge(low1, high1, low2, high2, lowdest); return; } /* * Basic approach: Find the middle element of one range (indexed by * split1). Find where this element would fit in the other range * (indexed by split 2). Then merge the two lower halves and the two * upper halves. */ split1 = ((high1 - low1 + 1) / 2) + low1; split2 = binsplit(*split1, low2, high2); lowsize = split1 - low1 + split2 - low2; /* * directly put the splitting element into * the appropriate location */ *(lowdest + lowsize + 1) = *split1; cilkmerge_seq(low1, split1 - 1, low2, split2, lowdest); cilkmerge_seq(split1 + 1, high1, split2 + 1, high2, lowdest + lowsize + 2); return; } void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { /* * Cilkmerge: Merges range [low1, high1] with range [low2, high2] * into the range [lowdest, ...] */ ELM *split1, *split2; /* * where each of the ranges are broken for * recursive merge */ long int lowsize; /* * total size of lower halves of two * ranges - 2 */ /* * We want to take the middle element (indexed by split1) from the * larger of the two arrays. The following code assumes that split1 * is taken from range [low1, high1]. So if [low1, high1] is * actually the smaller range, we should swap it with [low2, high2] */ if (high2 - low2 > high1 - low1) { swap_indices(low1, low2); swap_indices(high1, high2); } if (high2 < low2) { /* smaller range is empty */ memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1)); return; } if (high2 - low2 < sequential_merge_cutoff ) { seqmerge(low1, high1, low2, high2, lowdest); return; } /* * Basic approach: Find the middle element of one range (indexed by * split1). Find where this element would fit in the other range * (indexed by split 2). Then merge the two lower halves and the two * upper halves. */ split1 = ((high1 - low1 + 1) / 2) + low1; split2 = binsplit(*split1, low2, high2); lowsize = split1 - low1 + split2 - low2; /* * directly put the splitting element into * the appropriate location */ *(lowdest + lowsize + 1) = *split1; #pragma omp task untied cilkmerge_par(low1, split1 - 1, low2, split2, lowdest); #pragma omp task untied cilkmerge_par(split1 + 1, high1, split2 + 1, high2, lowdest + lowsize + 2); #pragma omp taskwait return; } void cilksort_seq(ELM *low, ELM *tmp, long size) { /* * divide the input in four parts of the same size (A, B, C, D) * Then: * 1) recursively sort A, B, C, and D (in parallel) * 2) merge A and B into tmp1, and C and D into tmp2 (in parallel) * 3) merge tmp1 and tmp2 into the original array */ long quarter = size / 4; ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD; if (size < quicksort_cutoff ) { /* quicksort when less than 1024 elements */ seqquick(low, low + size - 1); return; } A = low; tmpA = tmp; B = A + quarter; tmpB = tmpA + quarter; C = B + quarter; tmpC = tmpB + quarter; D = C + quarter; tmpD = tmpC + quarter; cilksort_seq(A, tmpA, quarter); cilksort_seq(B, tmpB, quarter); cilksort_seq(C, tmpC, quarter); cilksort_seq(D, tmpD, size - 3 * quarter); cilkmerge_seq(A, A + quarter - 1, B, B + quarter - 1, tmpA); cilkmerge_seq(C, C + quarter - 1, D, low + size - 1, tmpC); cilkmerge_seq(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A); } void cilksort_par(ELM *low, ELM *tmp, long size) { /* * divide the input in four parts of the same size (A, B, C, D) * Then: * 1) recursively sort A, B, C, and D (in parallel) * 2) merge A and B into tmp1, and C and D into tmp2 (in parallel) * 3) merge tmp1 and tmp2 into the original array */ long quarter = size / 4; ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD; if (size < quicksort_cutoff ) { /* quicksort when less than 1024 elements */ seqquick(low, low + size - 1); return; } A = low; tmpA = tmp; B = A + quarter; tmpB = tmpA + quarter; C = B + quarter; tmpC = tmpB + quarter; D = C + quarter; tmpD = tmpC + quarter; #pragma omp task untied cilksort_par(A, tmpA, quarter); #pragma omp task untied cilksort_par(B, tmpB, quarter); #pragma omp task untied cilksort_par(C, tmpC, quarter); #pragma omp task untied cilksort_par(D, tmpD, size - 3 * quarter); #pragma omp taskwait #pragma omp task untied cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA); #pragma omp task untied cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC); #pragma omp taskwait cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A); } void scramble_array( ELM *array ) { unsigned long i; unsigned long j; for (i = 0; i < size; ++i) { j = my_rand(); j = j % size; swap(array[i], array[j]); } } void fill_array( ELM *array ) { unsigned long i; my_srand(1); /* first, fill with integers 1..size */ for (i = 0; i < size; ++i) { array[i] = i; } } void sort_init_par ( void ) { /* Checking arguments */ if (size < 4) { fprintf(stdout,"%s can not be less than 4, using 4 as a parameter.\n", "Array Size" ); size = 4; } if (sequential_merge_cutoff < 2) { fprintf(stdout,"%s can not be less than 2, using 2 as a parameter.\n", "Sequential Merge cutoff value"); sequential_merge_cutoff = 2; } else if (sequential_merge_cutoff > size ) { fprintf(stdout,"%s can not be greather than vector size, using %d as a parameter.\n", "Sequential Merge cutoff value", size); sequential_merge_cutoff = size; } if (quicksort_cutoff > size ) { fprintf(stdout,"%s can not be greather than vector size, using %d as a parameter.\n", "Sequential Quicksort cutoff value", size); quicksort_cutoff = size; } if (insertion_cutoff > size ) { fprintf(stdout,"%s can not be greather than vector size, using %d as a parameter.\n", "Sequential Insertion cutoff value", size); insertion_cutoff = size; } if (insertion_cutoff > quicksort_cutoff) { fprintf(stdout,"%s can not be greather than %s, using %d as a parameter.\n", "Sequential Insertion cutoff value", "Sequential Quicksort cutoff value", quicksort_cutoff ); insertion_cutoff = quicksort_cutoff; } array = (ELM *) malloc(size * sizeof(ELM)); tmp = (ELM *) malloc(size * sizeof(ELM)); fill_array(array); scramble_array(array); } void sort_init_seq ( void ) { /* Checking arguments */ if (size < 4) { fprintf(stdout,"%s can not be less than 4, using 4 as a parameter.\n", "Array Size" ); size = 4; } if (sequential_merge_cutoff < 2) { fprintf(stdout,"%s can not be less than 2, using 2 as a parameter.\n", "Sequential Merge cutoff value"); sequential_merge_cutoff = 2; } else if (sequential_merge_cutoff > size ) { fprintf(stdout,"%s can not be greather than vector size, using %d as a parameter.\n", "Sequential Merge cutoff value", size); sequential_merge_cutoff = size; } if (quicksort_cutoff > size ) { fprintf(stdout,"%s can not be greather than vector size, using %d as a parameter.\n", "Sequential Quicksort cutoff value", size); quicksort_cutoff = size; } if (insertion_cutoff > size ) { fprintf(stdout,"%s can not be greather than vector size, using %d as a parameter.\n", "Sequential Insertion cutoff value", size); insertion_cutoff = size; } if (insertion_cutoff > quicksort_cutoff) { fprintf(stdout,"%s can not be greather than %s, using %d as a parameter.\n", "Sequential Insertion cutoff value", "Sequential Quicksort cutoff value", quicksort_cutoff ); insertion_cutoff = quicksort_cutoff; } seq = (ELM *) malloc(size * sizeof(ELM)); tmpseq = (ELM *) malloc(size * sizeof(ELM)); fill_array(seq); scramble_array(seq); } void sort_seq ( void ) { fprintf(stdout,"Computing multisort algorithm (n=%d) ", size); cilksort_seq(seq, tmpseq, size); fprintf(stdout," completed!\n"); } void sort_par ( void ) { fprintf(stdout,"Computing multisort algorithm (n=%d) ", size); #pragma omp parallel #pragma omp single nowait #pragma omp task untied cilksort_par(array, tmp, size); fprintf(stdout," completed!\n"); } int sort_verify ( void ) { int i, success = 1; for (i = 0; i < size; ++i) if (array[i] != i || seq[i] != i) success = 0; return success ? 1 : 0; } void print_usage() { fprintf(stderr, "\n"); fprintf(stderr, "Usage: %s -[options]\n", "Sort"); fprintf(stderr, "\n"); fprintf(stderr, "Where options are:\n"); fprintf(stderr, " -n <size> : Array Size\n"); fprintf(stderr, " -y <value> : Sequential Merge cutoff value(default=2048)\n"); fprintf(stderr, " -a <value> : Sequential Quicksort cutoff value(default=2048)\n"); fprintf(stderr, " -b <value> : Sequential Insertion cutoff value(default=20)\n"); fprintf(stderr, " -h : Print program's usage (this help).\n"); fprintf(stdout, "\n"); } int main(int argc, char* argv[]) { int i; for (i=1; i<argc; i++) { if (argv[i][0] == '-') { switch (argv[i][1]) { case 'n': /* read argument size 0 */ argv[i][1] = '*'; i++; if (argc == i) { "Erro\n"; exit(100); } size = atoi(argv[i]); break; case 'y': /* read argument size 0 */ argv[i][1] = '*'; i++; if (argc == i) { "Erro\n"; exit(100); } sequential_merge_cutoff = atoi(argv[i]); break; case 'a': /* read argument size 0 */ argv[i][1] = '*'; i++; if (argc == i) { "Erro\n"; exit(100); } quicksort_cutoff = atoi(argv[i]); break; case 'b': /* read argument size 0 */ argv[i][1] = '*'; i++; if (argc == i) { "Erro\n"; exit(100); } insertion_cutoff = atoi(argv[i]); break; case 'h': /* print usage */ argv[i][1] = '*'; print_usage(); exit (100); break; } } } sort_init_par(); double t_start, t_end; t_start = rtclock(); sort_par(); t_end = rtclock(); fprintf(stdout, "Parallel Runtime: %0.6lfs\n", t_end - t_start); sort_init_seq(); t_start = rtclock(); sort_seq(); t_end = rtclock(); fprintf(stdout, "Sequential Runtime: %0.6lfs\n", t_end - t_start); if (sort_verify()) { fprintf(stdout, "Result: Successful\n"); } else { fprintf(stdout, "Result: Unsuccessful\n"); } }
cblasfuncs.c
/* * This module provides a BLAS optimized matrix multiply, * inner product and dot for numpy arrays */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include <Python.h> #include <assert.h> #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API #define NO_IMPORT_ARRAY #include <numpy/arrayobject.h> #pragma omp declare target #include "npy_cblas.h" #pragma omp end declare target #define _MICARRAYMODULE #include "mpyndarraytypes.h" #include "arraytypes.h" #include "common.h" #include "mpymem_overlap.h" #include "convert.h" #include "creators.h" #include "scalar.h" /* These might be faster without the dereferencing of obj going on inside -- of course an optimizing compiler should inline the constants inside a for loop making it a moot point */ #define Array_GETPTR1(data, strides, i) ((void *)((char *) data + \ (i)*strides[0])) #define Array_GETPTR2(data, strides, i, j) ((void *)((char *) data + \ (i)*strides[0] + \ (j)*strides[1])) #define Array_GETPTR3(data, strides, i, j, k) ((void *)((char *) data + \ (i)*strides[0] + \ (j)*strides[1] + \ (k)*strides[2])) #define Array_GETPTR4(data, strides, i, j, k, l) ((void *)((char *) data + \ (i)*strides[0] + \ (j)*strides[1] + \ (k)*strides[2] + \ (l)*strides[3])) /* * Helper: call appropriate BLAS dot function for typenum. * Strides are NumPy strides. */ static void blas_dot(int device, int typenum, npy_intp n, void *a, npy_intp stridea, void *b, npy_intp strideb, void *res) { switch (typenum) { case NPY_DOUBLE: DOUBLE_dot(a, stridea, b, strideb, res, n, device); break; case NPY_FLOAT: FLOAT_dot(a, stridea, b, strideb, res, n, device); break; case NPY_CDOUBLE: CDOUBLE_dot(a, stridea, b, strideb, res, n, device); break; case NPY_CFLOAT: CFLOAT_dot(a, stridea, b, strideb, res, n, device); break; } } #pragma omp declare target static const double oneD[2] = {1.0, 0.0}, zeroD[2] = {0.0, 0.0}; static const float oneF[2] = {1.0, 0.0}, zeroF[2] = {0.0, 0.0}; #pragma omp end declare target /* * Helper: dispatch to appropriate cblas_?gemm for typenum. */ static void gemm(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE transA, enum CBLAS_TRANSPOSE transB, int m, int n, int k, PyMicArrayObject *A, int lda, PyMicArrayObject *B, int ldb, PyMicArrayObject *R) { const void *Adata = PyMicArray_DATA(A), *Bdata = PyMicArray_DATA(B); void *Rdata = PyMicArray_DATA(R); int ldc = PyMicArray_DIM(R, 1) > 1 ? PyMicArray_DIM(R, 1) : 1; int device = PyMicArray_DEVICE(A); #pragma omp target device(device) map(to: typenum, order, transA, transB, m, n, k, \ Adata, lda, Bdata, ldb, Rdata, ldc) switch (typenum) { case NPY_DOUBLE: cblas_dgemm(order, transA, transB, m, n, k, 1., Adata, lda, Bdata, ldb, 0., Rdata, ldc); break; case NPY_FLOAT: cblas_sgemm(order, transA, transB, m, n, k, 1.f, Adata, lda, Bdata, ldb, 0.f, Rdata, ldc); break; case NPY_CDOUBLE: cblas_zgemm(order, transA, transB, m, n, k, oneD, Adata, lda, Bdata, ldb, zeroD, Rdata, ldc); break; case NPY_CFLOAT: cblas_cgemm(order, transA, transB, m, n, k, oneF, Adata, lda, Bdata, ldb, zeroF, Rdata, ldc); break; } } /* * Helper: dispatch to appropriate cblas_?gemv for typenum. */ static void gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, PyMicArrayObject *A, int lda, PyMicArrayObject *X, int incX, PyMicArrayObject *R) { const void *Adata = PyMicArray_DATA(A), *Xdata = PyMicArray_DATA(X); void *Rdata = PyMicArray_DATA(R); int m = PyMicArray_DIM(A, 0), n = PyMicArray_DIM(A, 1); int device = PyMicArray_DEVICE(A); #pragma omp target device(device) map(to: typenum, order, trans, m, n, \ Adata, lda, Xdata, incX, Rdata) switch (typenum) { case NPY_DOUBLE: cblas_dgemv(order, trans, m, n, 1., Adata, lda, Xdata, incX, 0., Rdata, 1); break; case NPY_FLOAT: cblas_sgemv(order, trans, m, n, 1.f, Adata, lda, Xdata, incX, 0.f, Rdata, 1); break; case NPY_CDOUBLE: cblas_zgemv(order, trans, m, n, oneD, Adata, lda, Xdata, incX, zeroD, Rdata, 1); break; case NPY_CFLOAT: cblas_cgemv(order, trans, m, n, oneF, Adata, lda, Xdata, incX, zeroF, Rdata, 1); break; } } /* * Helper: dispatch to appropriate cblas_?syrk for typenum. */ static void syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans, int n, int k, PyMicArrayObject *A, int lda, PyMicArrayObject *R) { const void *Adata = PyMicArray_DATA(A); void *Rdata = PyMicArray_DATA(R); npy_intp *Rstrides = PyMicArray_STRIDES(R); int ldc = PyMicArray_DIM(R, 1) > 1 ? PyMicArray_DIM(R, 1) : 1; int device = PyMicArray_DEVICE(A); npy_intp i; npy_intp j; #pragma omp target device(device) map(to: typenum, order, trans, n, k, \ Adata, lda, ldc, Rdata, Rstrides[0:2]) switch (typenum) { case NPY_DOUBLE: cblas_dsyrk(order, CblasUpper, trans, n, k, 1., Adata, lda, 0., Rdata, ldc); for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { *((npy_double*)Array_GETPTR2(Rdata, Rstrides, j, i)) = *((npy_double*)Array_GETPTR2(Rdata, Rstrides, i, j)); } } break; case NPY_FLOAT: cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f, Adata, lda, 0.f, Rdata, ldc); for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { *((npy_float*)Array_GETPTR2(Rdata, Rstrides, j, i)) = *((npy_float*)Array_GETPTR2(Rdata, Rstrides, i, j)); } } break; case NPY_CDOUBLE: cblas_zsyrk(order, CblasUpper, trans, n, k, oneD, Adata, lda, zeroD, Rdata, ldc); for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { *((npy_cdouble*)Array_GETPTR2(Rdata, Rstrides, j, i)) = *((npy_cdouble*)Array_GETPTR2(Rdata, Rstrides, i, j)); } } break; case NPY_CFLOAT: cblas_csyrk(order, CblasUpper, trans, n, k, oneF, Adata, lda, zeroF, Rdata, ldc); for (i = 0; i < n; i++) { for (j = i + 1; j < n; j++) { *((npy_cfloat*)Array_GETPTR2(Rdata, Rstrides, j, i)) = *((npy_cfloat*)Array_GETPTR2(Rdata, Rstrides, i, j)); } } break; } } typedef enum {_scalar, _column, _row, _matrix} MatrixShape; static MatrixShape _select_matrix_shape(PyMicArrayObject *array) { switch (PyMicArray_NDIM(array)) { case 0: return _scalar; case 1: if (PyMicArray_DIM(array, 0) > 1) return _column; return _scalar; case 2: if (PyMicArray_DIM(array, 0) > 1) { if (PyMicArray_DIM(array, 1) == 1) return _column; else return _matrix; } if (PyMicArray_DIM(array, 1) == 1) return _scalar; return _row; } return _matrix; } /* * This also makes sure that the data segment is aligned with * an itemsize address as well by returning one if not true. */ static int _bad_strides(PyMicArrayObject *ap) { int itemsize = PyMicArray_ITEMSIZE(ap); int i, N=PyMicArray_NDIM(ap); npy_intp *strides = PyMicArray_STRIDES(ap); if (((npy_intp)(PyMicArray_DATA(ap)) % itemsize) != 0) { return 1; } for (i = 0; i < N; i++) { if ((strides[i] < 0) || (strides[i] % itemsize) != 0) { return 1; } } return 0; } /* * dot(a,b) * Returns the dot product of a and b for arrays of floating point types. * Like the generic numpy equivalent the product sum is over * the last dimension of a and the second-to-last dimension of b. * NB: The first argument is not conjugated.; * * This is for use by PyArray_MatrixProduct2. It is assumed on entry that * the arrays ap1 and ap2 have a common data type given by typenum that is * float, double, cfloat, or cdouble and have dimension <= 2. The * __array_ufunc__ nonsense is also assumed to have been taken care of. */ NPY_NO_EXPORT PyObject * cblas_matrixproduct(int typenum, PyMicArrayObject *ap1, PyMicArrayObject *ap2, PyMicArrayObject *out) { PyMicArrayObject *result = NULL, *out_buf = NULL; int j, lda, ldb; npy_intp l; int nd; npy_intp ap1stride = 0; npy_intp dimensions[NPY_MAXDIMS]; npy_intp numbytes; double prior1, prior2; PyTypeObject *subtype; MatrixShape ap1shape, ap2shape; void *tmpdata; int device = PyMicArray_DEVICE(ap1); // Assume on the same device if (_bad_strides(ap1)) { PyObject *op1 = PyMicArray_NewCopy(ap1, NPY_ANYORDER); Py_DECREF(ap1); ap1 = (PyMicArrayObject *)op1; if (ap1 == NULL) { goto fail; } } if (_bad_strides(ap2)) { PyObject *op2 = PyMicArray_NewCopy(ap2, NPY_ANYORDER); Py_DECREF(ap2); ap2 = (PyMicArrayObject *)op2; if (ap2 == NULL) { goto fail; } } ap1shape = _select_matrix_shape(ap1); ap2shape = _select_matrix_shape(ap2); if (ap1shape == _scalar || ap2shape == _scalar) { PyMicArrayObject *oap1, *oap2; oap1 = ap1; oap2 = ap2; /* One of ap1 or ap2 is a scalar */ if (ap1shape == _scalar) { /* Make ap2 the scalar */ PyMicArrayObject *t = ap1; ap1 = ap2; ap2 = t; ap1shape = ap2shape; ap2shape = _scalar; } if (ap1shape == _row) { ap1stride = PyMicArray_STRIDE(ap1, 1); } else if (PyMicArray_NDIM(ap1) > 0) { ap1stride = PyMicArray_STRIDE(ap1, 0); } if (PyMicArray_NDIM(ap1) == 0 || PyMicArray_NDIM(ap2) == 0) { npy_intp *thisdims; if (PyMicArray_NDIM(ap1) == 0) { nd = PyMicArray_NDIM(ap2); thisdims = PyMicArray_DIMS(ap2); } else { nd = PyMicArray_NDIM(ap1); thisdims = PyMicArray_DIMS(ap1); } l = 1; for (j = 0; j < nd; j++) { dimensions[j] = thisdims[j]; l *= dimensions[j]; } } else { l = PyMicArray_DIM(oap1, PyMicArray_NDIM(oap1) - 1); if (PyMicArray_DIM(oap2, 0) != l) { dot_alignment_error(oap1, PyMicArray_NDIM(oap1) - 1, oap2, 0); goto fail; } nd = PyMicArray_NDIM(ap1) + PyMicArray_NDIM(ap2) - 2; /* * nd = 0 or 1 or 2. If nd == 0 do nothing ... */ if (nd == 1) { /* * Either PyArray_NDIM(ap1) is 1 dim or PyArray_NDIM(ap2) is * 1 dim and the other is 2 dim */ dimensions[0] = (PyMicArray_NDIM(oap1) == 2) ? PyMicArray_DIM(oap1, 0) : PyMicArray_DIM(oap2, 1); l = dimensions[0]; /* * Fix it so that dot(shape=(N,1), shape=(1,)) * and dot(shape=(1,), shape=(1,N)) both return * an (N,) array (but use the fast scalar code) */ } else if (nd == 2) { dimensions[0] = PyMicArray_DIM(oap1, 0); dimensions[1] = PyMicArray_DIM(oap2, 1); /* * We need to make sure that dot(shape=(1,1), shape=(1,N)) * and dot(shape=(N,1),shape=(1,1)) uses * scalar multiplication appropriately */ if (ap1shape == _row) { l = dimensions[1]; } else { l = dimensions[0]; } } /* Check if the summation dimension is 0-sized */ if (PyMicArray_DIM(oap1, PyMicArray_NDIM(oap1) - 1) == 0) { l = 0; } } } else { /* * (PyArray_NDIM(ap1) <= 2 && PyArray_NDIM(ap2) <= 2) * Both ap1 and ap2 are vectors or matrices */ l = PyMicArray_DIM(ap1, PyMicArray_NDIM(ap1) - 1); if (PyMicArray_DIM(ap2, 0) != l) { dot_alignment_error(ap1, PyMicArray_NDIM(ap1) - 1, ap2, 0); goto fail; } nd = PyMicArray_NDIM(ap1) + PyMicArray_NDIM(ap2) - 2; if (nd == 1) { dimensions[0] = (PyMicArray_NDIM(ap1) == 2) ? PyMicArray_DIM(ap1, 0) : PyMicArray_DIM(ap2, 1); } else if (nd == 2) { dimensions[0] = PyMicArray_DIM(ap1, 0); dimensions[1] = PyMicArray_DIM(ap2, 1); } } /* Choose which subtype to return */ if (Py_TYPE(ap1) != Py_TYPE(ap2)) { prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); } else { prior1 = prior2 = 0.0; subtype = Py_TYPE(ap1); } if (out != NULL) { int d; /* verify that out is usable */ if (Py_TYPE(out) != subtype || PyMicArray_NDIM(out) != nd || PyMicArray_TYPE(out) != typenum || !PyMicArray_ISCARRAY(out)) { PyErr_SetString(PyExc_ValueError, "output array is not acceptable " "(must have the right type, nr dimensions, and be a C-Array)"); goto fail; } for (d = 0; d < nd; ++d) { if (dimensions[d] != PyMicArray_DIM(out, d)) { PyErr_SetString(PyExc_ValueError, "output array has wrong dimensions"); goto fail; } } /* check for memory overlap */ if (!(solve_may_share_memory(out, ap1, 1) == 0 && solve_may_share_memory(out, ap2, 1) == 0)) { /* allocate temporary output array */ out_buf = (PyMicArrayObject *)PyMicArray_NewLikeArray(device, (PyArrayObject *)out, NPY_CORDER, NULL, 0); if (out_buf == NULL) { goto fail; } /* set copy-back */ /* TODO: check whether SetUpdateIfCopyBase work normally */ Py_INCREF(out); if (PyMicArray_SetUpdateIfCopyBase(out_buf, out) < 0) { Py_DECREF(out); goto fail; } } else { Py_INCREF(out); out_buf = out; } Py_INCREF(out); result = out; } else { PyObject *tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1); out_buf = (PyMicArrayObject *)PyMicArray_New(device, subtype, nd, dimensions, typenum, NULL, NULL, 0, 0, tmp); if (out_buf == NULL) { goto fail; } Py_INCREF(out_buf); result = out_buf; } numbytes = PyMicArray_NBYTES(out_buf); target_memset(PyMicArray_DATA(out_buf), 0, numbytes, device); if (numbytes == 0 || l == 0) { Py_DECREF(ap1); Py_DECREF(ap2); return PyMicArray_Return(out_buf); } /* Prepare for offloading */ void *ap1data = PyMicArray_DATA(ap1); void *ap2data = PyMicArray_DATA(ap2); void *outdata = PyMicArray_DATA(out_buf); int ap1ndim = PyMicArray_NDIM(ap1); int ap2ndim = PyMicArray_NDIM(ap2); int outndim = PyMicArray_NDIM(out_buf); npy_intp *ap1dims = PyMicArray_DIMS(ap1); npy_intp *ap2dims = PyMicArray_DIMS(ap2); npy_intp *outdims = PyMicArray_DIMS(out_buf); npy_intp *ap1strides_ptr = PyMicArray_STRIDES(ap1); npy_intp *ap2strides_ptr = PyMicArray_STRIDES(ap2); npy_intp *outstrides_ptr = PyMicArray_STRIDES(out_buf); if (ap2shape == _scalar) { /* * Multiplication by a scalar -- Level 1 BLAS * if ap1shape is a matrix and we are not contiguous, then we can't * just blast through the entire array using a single striding factor */ NPY_BEGIN_ALLOW_THREADS; if (typenum == NPY_DOUBLE) { if (l == 1) { #pragma omp target device(device) map(to: outdata, ap1data, ap2data) *((double *)outdata) = *((double *)ap2data) * *((double *)ap1data); } else if (ap1shape != _matrix) { #pragma omp target device(device) map(to: l, ap2data, ap1data, \ ap1stride, outdata) cblas_daxpy(l, *((double *)ap2data), (double *)ap1data, ap1stride/sizeof(double), (double *)outdata, 1); } else { int maxind, oind, i, a1s, outs; char *ptr, *optr; //double val; npy_intp niter, incptr, incoptr; maxind = (PyMicArray_DIM(ap1, 0) >= PyMicArray_DIM(ap1, 1) ? 0 : 1); oind = 1 - maxind; ptr = PyMicArray_DATA(ap1); optr = PyMicArray_DATA(out_buf); l = PyMicArray_DIM(ap1, maxind); //val = *((double *)PyMicArray_DATA(ap2)); a1s = PyMicArray_STRIDE(ap1, maxind) / sizeof(double); outs = PyMicArray_STRIDE(out_buf, maxind) / sizeof(double); niter = PyMicArray_DIM(ap1, oind); incptr = PyMicArray_STRIDE(ap1, oind); incoptr = PyMicArray_STRIDE(out_buf, oind); #pragma omp target device(device) \ map(to: l, ptr, a1s, optr, outs, ap2data,\ niter, incptr, incoptr) for (i = 0; i < niter; i++) { cblas_daxpy(l, *((double *)ap2data), (double *)ptr, a1s, (double *)optr, outs); ptr += incptr; optr += incoptr; } } } else if (typenum == NPY_CDOUBLE) { if (l == 1) { npy_cdouble *ptr1, *ptr2, *res; #pragma omp target device(device) map(to: outdata, ap1data, ap2data) { ptr1 = (npy_cdouble *)ap2data; ptr2 = (npy_cdouble *)ap1data; res = (npy_cdouble *)outdata; res->real = ptr1->real * ptr2->real - ptr1->imag * ptr2->imag; res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; } } else if (ap1shape != _matrix) { #pragma omp target device(device) map(to: l, ap1stride, \ outdata, ap1data, ap2data) cblas_zaxpy(l, (double *)ap2data, (double *)ap1data, ap1stride/sizeof(npy_cdouble), (double *)outdata, 1); } else { int maxind, oind, i, a1s, outs; char *ptr, *optr; double *pval; npy_intp niter, incptr, incoptr; maxind = (PyMicArray_DIM(ap1, 0) >= PyMicArray_DIM(ap1, 1) ? 0 : 1); oind = 1 - maxind; ptr = PyMicArray_DATA(ap1); optr = PyMicArray_DATA(out_buf); l = PyMicArray_DIM(ap1, maxind); pval = (double *)PyMicArray_DATA(ap2); a1s = PyMicArray_STRIDE(ap1, maxind) / sizeof(npy_cdouble); outs = PyMicArray_STRIDE(out_buf, maxind) / sizeof(npy_cdouble); niter = PyMicArray_DIM(ap1, oind); incptr = PyMicArray_STRIDE(ap1, oind); incoptr = PyMicArray_STRIDE(out_buf, oind); #pragma omp target device(device) map(to: l, pval, ptr, a1s, \ optr, outs, \ niter, incptr, incoptr) for (i = 0; i < niter; i++) { cblas_zaxpy(l, pval, (double *)ptr, a1s, (double *)optr, outs); ptr += incptr; optr += incoptr; } } } else if (typenum == NPY_FLOAT) { if (l == 1) { #pragma omp target device(device) map(to: outdata, ap1data, ap2data) *((float *)outdata) = *((float *)ap2data) * *((float *)ap1data); } else if (ap1shape != _matrix) { #pragma omp target device(device) map(to: l, ap1stride, \ outdata, ap1data, ap2data) cblas_saxpy(l, *((float *)ap2data), (float *)ap1data, ap1stride/sizeof(float), (float *)outdata, 1); } else { int maxind, oind, i, a1s, outs; char *ptr, *optr; //float val; npy_intp niter, incptr, incoptr; maxind = (PyMicArray_DIM(ap1, 0) >= PyMicArray_DIM(ap1, 1) ? 0 : 1); oind = 1 - maxind; ptr = PyMicArray_DATA(ap1); optr = PyMicArray_DATA(out_buf); l = PyMicArray_DIM(ap1, maxind); //val = *((float *)PyMicArray_DATA(ap2)); a1s = PyMicArray_STRIDE(ap1, maxind) / sizeof(float); outs = PyMicArray_STRIDE(out_buf, maxind) / sizeof(float); niter = PyMicArray_DIM(ap1, oind); incptr = PyMicArray_STRIDE(ap1, oind); incoptr = PyMicArray_STRIDE(out_buf, oind); #pragma omp target device(device) map(to: l, ptr, a1s, \ optr, outs, ap2data, \ niter, incptr, incoptr) for (i = 0; i < niter; i++) { cblas_saxpy(l, *((float *)ap2data), (float *)ptr, a1s, (float *)optr, outs); ptr += incptr; optr += incoptr; } } } else if (typenum == NPY_CFLOAT) { if (l == 1) { npy_cfloat *ptr1, *ptr2, *res; #pragma omp target device(device) map(to: outdata, ap1data, ap2data) { ptr1 = (npy_cfloat *)PyMicArray_DATA(ap2); ptr2 = (npy_cfloat *)PyMicArray_DATA(ap1); res = (npy_cfloat *)PyMicArray_DATA(out_buf); res->real = ptr1->real * ptr2->real - ptr1->imag * ptr2->imag; res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real; } } else if (ap1shape != _matrix) { #pragma omp target device(device) map(to: l, ap1stride, \ outdata, ap1data, ap2data) cblas_caxpy(l, (float *)ap2data, (float *)ap1data, ap1stride/sizeof(npy_cfloat), (float *)outdata, 1); } else { int maxind, oind, i, a1s, outs; char *ptr, *optr; float *pval; npy_intp niter, incptr, incoptr; maxind = (PyMicArray_DIM(ap1, 0) >= PyMicArray_DIM(ap1, 1) ? 0 : 1); oind = 1 - maxind; ptr = PyMicArray_DATA(ap1); optr = PyMicArray_DATA(out_buf); l = PyMicArray_DIM(ap1, maxind); pval = (float *)PyMicArray_DATA(ap2); a1s = PyMicArray_STRIDE(ap1, maxind) / sizeof(npy_cfloat); outs = PyMicArray_STRIDE(out_buf, maxind) / sizeof(npy_cfloat); niter = PyMicArray_DIM(ap1, oind); incptr = PyMicArray_STRIDE(ap1, oind); incoptr = PyMicArray_STRIDE(out_buf, oind); #pragma omp target device(device) map(to: l, ptr, a1s, \ optr, outs, pval, \ niter, incptr, incoptr) for (i = 0; i < niter; i++) { cblas_caxpy(l, pval, (float *)ptr, a1s, (float *)optr, outs); ptr += incptr; optr += incoptr; } } } /*End offload section */ NPY_END_ALLOW_THREADS; } else if ((ap2shape == _column) && (ap1shape != _matrix)) { NPY_BEGIN_ALLOW_THREADS; /* Dot product between two vectors -- Level 1 BLAS */ blas_dot(device, typenum, l, PyMicArray_DATA(ap1), PyMicArray_STRIDE(ap1, (ap1shape == _row)), PyMicArray_DATA(ap2), PyMicArray_STRIDE(ap2, 0), PyMicArray_DATA(out_buf)); NPY_END_ALLOW_THREADS; } else if (ap1shape == _matrix && ap2shape != _matrix) { /* Matrix vector multiplication -- Level 2 BLAS */ /* lda must be MAX(M,1) */ enum CBLAS_ORDER Order; int ap2s; if (!PyMicArray_ISONESEGMENT(ap1)) { PyObject *new; new = PyMicArray_Copy(ap1); Py_DECREF(ap1); ap1 = (PyMicArrayObject *)new; if (new == NULL) { goto fail; } } NPY_BEGIN_ALLOW_THREADS if (PyMicArray_ISCONTIGUOUS(ap1)) { Order = CblasRowMajor; lda = (PyMicArray_DIM(ap1, 1) > 1 ? PyMicArray_DIM(ap1, 1) : 1); } else { Order = CblasColMajor; lda = (PyMicArray_DIM(ap1, 0) > 1 ? PyMicArray_DIM(ap1, 0) : 1); } ap2s = PyMicArray_STRIDE(ap2, 0) / PyMicArray_ITEMSIZE(ap2); gemv(typenum, Order, CblasNoTrans, ap1, lda, ap2, ap2s, out_buf); NPY_END_ALLOW_THREADS; } else if (ap1shape != _matrix && ap2shape == _matrix) { /* Vector matrix multiplication -- Level 2 BLAS */ enum CBLAS_ORDER Order; int ap1s; if (!PyMicArray_ISONESEGMENT(ap2)) { PyObject *new; new = PyMicArray_Copy(ap2); Py_DECREF(ap2); ap2 = (PyMicArrayObject *)new; if (new == NULL) { goto fail; } } NPY_BEGIN_ALLOW_THREADS if (PyMicArray_ISCONTIGUOUS(ap2)) { Order = CblasRowMajor; lda = (PyMicArray_DIM(ap2, 1) > 1 ? PyMicArray_DIM(ap2, 1) : 1); } else { Order = CblasColMajor; lda = (PyMicArray_DIM(ap2, 0) > 1 ? PyMicArray_DIM(ap2, 0) : 1); } if (ap1shape == _row) { ap1s = PyMicArray_STRIDE(ap1, 1) / PyMicArray_ITEMSIZE(ap1); } else { ap1s = PyMicArray_STRIDE(ap1, 0) / PyMicArray_ITEMSIZE(ap1); } gemv(typenum, Order, CblasTrans, ap2, lda, ap1, ap1s, out_buf); NPY_END_ALLOW_THREADS; } else { /* * (PyArray_NDIM(ap1) == 2 && PyArray_NDIM(ap2) == 2) * Matrix matrix multiplication -- Level 3 BLAS * L x M multiplied by M x N */ enum CBLAS_ORDER Order; enum CBLAS_TRANSPOSE Trans1, Trans2; int M, N, L; /* Optimization possible: */ /* * We may be able to handle single-segment arrays here * using appropriate values of Order, Trans1, and Trans2. */ if (!PyMicArray_IS_C_CONTIGUOUS(ap2) && !PyMicArray_IS_F_CONTIGUOUS(ap2)) { PyObject *new = PyMicArray_Copy(ap2); Py_DECREF(ap2); ap2 = (PyMicArrayObject *)new; if (new == NULL) { goto fail; } } if (!PyMicArray_IS_C_CONTIGUOUS(ap1) && !PyMicArray_IS_F_CONTIGUOUS(ap1)) { PyObject *new = PyMicArray_Copy(ap1); Py_DECREF(ap1); ap1 = (PyMicArrayObject *)new; if (new == NULL) { goto fail; } } NPY_BEGIN_ALLOW_THREADS; Order = CblasRowMajor; Trans1 = CblasNoTrans; Trans2 = CblasNoTrans; L = PyMicArray_DIM(ap1, 0); N = PyMicArray_DIM(ap2, 1); M = PyMicArray_DIM(ap2, 0); lda = (PyMicArray_DIM(ap1, 1) > 1 ? PyMicArray_DIM(ap1, 1) : 1); ldb = (PyMicArray_DIM(ap2, 1) > 1 ? PyMicArray_DIM(ap2, 1) : 1); /* * Avoid temporary copies for arrays in Fortran order */ if (PyMicArray_IS_F_CONTIGUOUS(ap1)) { Trans1 = CblasTrans; lda = (PyMicArray_DIM(ap1, 0) > 1 ? PyMicArray_DIM(ap1, 0) : 1); } if (PyMicArray_IS_F_CONTIGUOUS(ap2)) { Trans2 = CblasTrans; ldb = (PyMicArray_DIM(ap2, 0) > 1 ? PyMicArray_DIM(ap2, 0) : 1); } /* * Use syrk if we have a case of a matrix times its transpose. * Otherwise, use gemm for all other cases. */ if ( (PyMicArray_BYTES(ap1) == PyMicArray_BYTES(ap2)) && (PyMicArray_DIM(ap1, 0) == PyMicArray_DIM(ap2, 1)) && (PyMicArray_DIM(ap1, 1) == PyMicArray_DIM(ap2, 0)) && (PyMicArray_STRIDE(ap1, 0) == PyMicArray_STRIDE(ap2, 1)) && (PyMicArray_STRIDE(ap1, 1) == PyMicArray_STRIDE(ap2, 0)) && ((Trans1 == CblasTrans) ^ (Trans2 == CblasTrans)) && ((Trans1 == CblasNoTrans) ^ (Trans2 == CblasNoTrans)) ) { if (Trans1 == CblasNoTrans) { syrk(typenum, Order, Trans1, N, M, ap1, lda, out_buf); } else { syrk(typenum, Order, Trans1, N, M, ap2, ldb, out_buf); } } else { gemm(typenum, Order, Trans1, Trans2, L, N, M, ap1, lda, ap2, ldb, out_buf); } NPY_END_ALLOW_THREADS; } Py_DECREF(ap1); Py_DECREF(ap2); /* Trigger possible copyback into `result` */ Py_DECREF(out_buf); return PyMicArray_Return(result); fail: Py_XDECREF(ap1); Py_XDECREF(ap2); Py_XDECREF(out_buf); Py_XDECREF(result); return NULL; }
EDT.h
#ifndef EDT_INCLUDED #define EDT_INCLUDED #include <omp.h> #include "SignalProcessing/CubeGrid.h" template< class Real > void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& edt , int threads=1 ); void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< int >& edt , int threads=1 ); template< class Real > void GaussianEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& gedt , Real fallOff=Real(sqrt(8.)) , int threads=1 ); /////////////////////////////// // Rasterization definitions // /////////////////////////////// template< class Real > void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& edt , int threads ) { int res = rasterization.resolution(); CubeGrid< int > _edt; SquaredEDT( rasterization , _edt , threads ); edt.resize( res ); const int* _edtPtr = _edt[0]; Real *edtPtr = edt[0]; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<res*res*res ; i++ ) edtPtr[i] = Real( _edtPtr[i] ); } template< class Real > void GaussianEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& gedt , Real fallOff , int threads ) { int res = rasterization.resolution(); SquaredEDT( rasterization , gedt , threads ); Real* _gedt = gedt[0]; fallOff = Real(2.)*fallOff*fallOff; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<res*res*res; i++ ) _gedt[i] = Real( exp( - _gedt[i] / fallOff) ); } void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< int >& edt , int threads ) { threads = std::max< int >( threads , 1 ); int res = rasterization.resolution(); if( !res ) { fprintf( stderr , "[WARNING] Cannot compute distance transform of zero resolution rasterization\n" ); return; } edt.resize( rasterization.resolution() ); std::vector< int* > oldBuffer( threads ) , newBuffer( threads ); for( int i=0 ; i<threads ; i++ ) oldBuffer[i] = new int[res] , newBuffer[i] = new int[res]; // Set the upper bound on the distance values { int* edtPtr = edt[0]; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<res*res*res ; i++ ) edtPtr[i] = 3 * (res+1) * (res+1); } // scan along z axis #pragma omp parallel for num_threads( threads ) for( int xy=0 ; xy<res*res ; xy++ ) { int x = xy/res , y = xy%res; bool first=true; int dist = 0; int* edtPtr = edt[x] + y*res; const char* rasterizationPtr = rasterization[x] + y*res; for( int z=0 ; z<res ; z++ ) { if( rasterizationPtr[z] ) { dist = 0; first = false; edtPtr[z] = 0; } else if( !first ) { dist++; edtPtr[z] = dist*dist; } } // backward scan dist = 0; first = true; for( int z=(res-1) ; z>=0 ; z-- ) { if( rasterizationPtr[z] ) { dist = 0; first = false; edtPtr[z] = 0; } else if( !first ) { dist++; int square = dist*dist; if( square<edtPtr[z] ) edtPtr[z] = square; } } } // scan along y axis #pragma omp parallel for num_threads( threads ) for( int thread=0 ; thread<threads ; thread++ ) { int *_oldBuffer=oldBuffer[thread] , *_newBuffer=newBuffer[thread]; for( int xz=(res*res*thread)/threads ; xz<(res*res*(thread+1))/threads ; xz++ ) { int x = xz/res , z = xz%res; // forward scan int s=0; int* edtPtr = edt[x] + z; for( int y=0 ; y<res; y++ ) { _oldBuffer[y] = edtPtr[y*res]; int dist = _oldBuffer[y]; bool foundCloser=false; if( dist ) { for( int t=s ; t<=y ; t++ ) { int new_dist = _oldBuffer[t] + (y - t) * (y - t); if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } } if( !foundCloser ) s=y; _newBuffer[y] = dist; } // backward scan s=res-1; for( int y=res-1 ; y>=0 ; y-- ) { int dist = _newBuffer[y]; bool foundCloser = false; if( dist ) { for( int t=s ; t>y ; t-- ) { int new_dist = _oldBuffer[t] + (y - t) * (y - t); if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } edtPtr[y*res] = dist; } if( !foundCloser ) s=y; } } } // scan along x axis #pragma omp parallel for num_threads( threads ) for( int thread=0 ; thread<threads ; thread++ ) { int *_oldBuffer = oldBuffer[thread] , *_newBuffer = newBuffer[thread]; for( int yz=(res*res*thread)/threads ; yz<(res*res*(thread+1))/threads ; yz++ ) { int y = yz/res , z=yz%res; // forward scan int s=0; int* edtPtr = edt[0] + y*res+z; for( int x=0 ; x<res ; x++ ) { int dist = _oldBuffer[x] = edtPtr[x*res*res]; // If the calculated distance to this point is not zero, // start from s and see if you can find something closer. bool foundCloser = false; if( dist ) { for( int t=s ; t<=x ; t++ ) { // Compute the squared distance that would be obtained if we used the (squared) orthogonal distance to t // plus the (squared) parallel distance from t to x int new_dist = _oldBuffer[t] + (x - t) * (x - t); // <=> new_dist = _oldBuffer[t] + x*x - 2*t*x + t*t // If s has not been updated then: _oldBuffer[t] + (x - t) * (x - t) > _oldBuffer[x] for all t <= x // Taking y = x+d (w/ d>0) we get: // _oldBuffer[t] + ( y - t ) * ( y - t ) = _oldBuffer[t] + ( x - t + d ) * ( x - t + d ) // = _oldBuffer[t] + ( x - t ) * ( x - t ) + 2 * d * ( x - t ) + d * d // > _oldBuffer[x] + ( y - x ) * ( y - x ) + 2 * d * ( x - t ) // > _oldBuffer[x] + ( y - x ) * ( y - x ) // for all t <= x // That is, the squared distance through t <= x has to be at least as large as the squared distance through x if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } } if( !foundCloser ) s=x; _newBuffer[x] = dist; } // backwards scan s = res-1; for( int x=res-1 ; x>=0 ; x-- ) { int dist = _newBuffer[x]; bool foundCloser = false; if( dist ) { for( int t=s; t>=x ; t-- ) { int new_dist = _oldBuffer[t] + (x - t) * (x - t); if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } edtPtr[x*res*res] = dist; } if( !foundCloser ) s=x; } } } for( int i=0 ; i<threads ; i++ ) { delete[] oldBuffer[i]; delete[] newBuffer[i]; } } #endif // EDT_INCLUDED
GB_unop__sin_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sin_fc64_fc64) // op(A') function: GB (_unop_tran__sin_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csin (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csin (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csin (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sin_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csin (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csin (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sin_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_pem_fmt_plug.c
/* * JtR OpenCL format to crack PEM files. * * This software is Copyright (c) 2017, Dhiru Kholia <dhiru at openwall.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * The OpenCL boilerplate code is borrowed from other OpenCL formats. */ #ifdef HAVE_OPENCL #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */ #endif #if HAVE_LIBZ #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_pem; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_pem); #else #include <string.h> #include <stdint.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "pem_common.h" #include "options.h" #include "jumbo.h" #include "common-opencl.h" #include "misc.h" #define OUTLEN (32) #include "opencl_pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "pem-opencl" #define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define CPU_ALGORITHM_NAME " 3DES" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_WORD #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*cur_salt) #define SALT_ALIGN MEM_ALIGN_WORD /* This handles all widths */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static int *cracked; static int any_cracked; static struct custom_salt *cur_salt; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_out *output; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt, mem_state; static size_t key_buf_size; static int new_keys; static struct fmt_main *self; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; #define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width) /* * HASH_LOOPS is ideally made by factors of (iteration count - 1) and should * be chosen for a kernel duration of not more than 200 ms */ #define HASH_LOOPS (3 * 271) // XXX #define ITERATIONS 100000 /* Just for auto tune */ #define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS) #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = 64 * gws; // Allocate memory inbuffer = mem_calloc(1, key_buf_size); output = mem_alloc(sizeof(pbkdf2_out) * gws); cracked = mem_calloc(1, cracked_size); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(output); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, ocl_v_width); self->params.algorithm_name = valgo; } } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2 * HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static void set_salt(void *salt) { cur_salt = (struct custom_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, SALTLEN); currentsalt.length = SALTLEN; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = 24; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void pem_set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i, j, index; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } // Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } // Run kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) { for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); } // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { if (pem_decrypt((unsigned char*)output[index].dk, cur_salt->iv, cur_salt->ciphertext, cur_salt) == 0) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_pem = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, pem_tests }, { init, done, reset, fmt_default_prepare, pem_valid, fmt_default_split, fmt_default_binary, pem_get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, pem_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_LIBZ */ #endif /* HAVE_OPENCL */
solution3.c
# include <math.h> # define np 400 # define nnmax 7 # define ndr (2 * np) # define pi2 (2.0 * 3.141592653589793) extern double s1[np][3], s2[np][3], s3[np][3]; extern int ldr[ndr][3]; void topol(double s[][3], double *q) { double siga; double cc, cc1, cc2, cc3; double ss, ss1, ss2, ss3; int idr, n, is; siga = 0.0; for (idr = 0; idr <= ndr/2; idr += ndr/2) { #pragma omp parallel for for (n = 1; n < ndr/2; n++) { s1[n][0] = s[ldr[idr+n][0]][0]; s1[n][1] = s[ldr[idr+n][0]][1]; s1[n][2] = s[ldr[idr+n][0]][2]; } #pragma omp parallel for for (n = 1; n < ndr/2; n++) { s2[n][0] = s[ldr[idr+n][1]][0]; s2[n][1] = s[ldr[idr+n][1]][1]; s2[n][2] = s[ldr[idr+n][1]][2]; } #pragma omp parallel for for (n = 1; n < ndr/2; n++) { s3[n][0] = s[ldr[idr+n][2]][0]; s3[n][1] = s[ldr[idr+n][2]][1]; s3[n][2] = s[ldr[idr+n][2]][2]; } /***** cc = 1 + s1*s2 + s2*s3 + s3*s1 *****/ /***** ss = s1 * ( s2 x s3 ) *****/ #pragma omp parallel for default(none) \ shared(s1, s2, s3) \ private(cc1, cc2, cc3, cc, ss1, ss2, ss3, ss) \ reduction(+:siga) for (is = 1; is < ndr/2; is++) { cc1 = s1[is][0] * s2[is][0] + s1[is][1] * s2[is][1] + s1[is][2] * s2[is][2]; cc2 = s2[is][0] * s3[is][0] + s2[is][1] * s3[is][1] + s2[is][2] * s3[is][2]; cc3 = s3[is][0] * s1[is][0] + s3[is][1] * s1[is][1] + s3[is][2] * s1[is][2]; cc = 1.0 + cc1 + cc2 + cc3; ss1 = s2[is][1] * s3[is][2] - s2[is][2] * s3[is][1]; ss2 = s2[is][2] * s3[is][0] - s2[is][0] * s3[is][2]; ss3 = s2[is][0] * s3[is][1] - s2[is][1] * s3[is][0]; ss = s1[is][0] * ss1 + s1[is][1] * ss2 + s1[is][2] * ss3; siga += atan2(ss,cc); } } *q = siga / pi2; }
StateRewardData.h
// // CubismUP_3D // // Written by Guido Novati ( novatig@ethz.ch ). // Copyright (c) 2017 ETHZ. All rights reserved. // #ifndef CubismUP_3D_StateRewardData_h #define CubismUP_3D_StateRewardData_h //#include <cassert> //#include <assert.h> // utmost import to be defined before including cubism static const int NpLatLine = 10; //#define __ExploreHalfWake #ifdef __RL_MPI_CLIENT //hardcoded BC for DCyl #define checkTerm(...) checkTerm_DcylFollower(__VA_ARGS__) #define sendInitC(...) sendInitC_DcylFollower(__VA_ARGS__) #define setRefFrm() setRefFrm_DCylFollower() //TODO: // - 2/N fish want open bc in z // - cleaning: maybe compile cubism and set flags based on user's app choice #endif #include "../Definitions.h" #include "../ObstacleBlock.h" CubismUP_3D_NAMESPACE_BEGIN struct StateReward { double lengthscale, timescale; double velscale = lengthscale/timescale; double forcescale = velscale*velscale*lengthscale*lengthscale; //l^4/t^2 double powerscale = forcescale*velscale; //rho*l^3 * l/t^2 * l/t bool bRestart = false; bool bForgiving=0, bLoadedActions=0, bInteractive=0, randomStart=0; //bool randomActions, bSpiral; int info=1, stepId=0;//, nActions=2; double t_next_comm=0, Tstartlearn=1e9, GoalDX=0, new_curv=0, old_curv=0, new_Tp=0; //exponential averages double thExp = 0, vxExp = 0, vyExp = 0, avExp = 0; //average quantities double avg_wght = 0; double ThetaAvg = 0, ThetaVel = 0, VxAvg = 0, VyAvg = 0, AvAvg = 0; double PoutBnd = 0, Pout = 0, defPowerBnd = 0, defPower = 0, ToD = 0; double EffPDefBnd = 0, EffPDef = 0, Pthrust = 0, Pdrag = 0; void resetAverage() { avg_wght = ThetaAvg = ThetaVel = VxAvg = VyAvg = AvAvg = Pthrust = ToD = 0; PoutBnd = Pout = defPowerBnd = defPower = Pdrag = EffPDefBnd = EffPDef = 0; } void updateAverages(const double _dt, const double _th, const double _vx, const double _vy, const double _av, const double _pO1, const double _pO2, const double _pW1, const double _pW2, const double _eff1, const double _eff2, const double _pT, const double _pD, const double _T, const double _D) { if(_dt<=0) return; const double _ToD=_D<1e-9?0:_T/_D, _W=1/(avg_wght+_dt), _vt=atan2(_vy,_vx); VxAvg = ( VxAvg * avg_wght + _vx * _dt ) * _W; VyAvg = ( VyAvg * avg_wght + _vy * _dt ) * _W; AvAvg = ( AvAvg * avg_wght + _av * _dt ) * _W; ThetaAvg = ( ThetaAvg * avg_wght + _th * _dt ) * _W; ThetaVel = ( ThetaVel * avg_wght + _vt * _dt ) * _W; Pout = ( Pout * avg_wght + _pO1 * _dt ) * _W; PoutBnd = ( PoutBnd * avg_wght + _pO2 * _dt ) * _W; defPower = ( defPower * avg_wght + _pW1 * _dt ) * _W; defPowerBnd = ( defPowerBnd * avg_wght + _pW2 * _dt ) * _W; EffPDef = ( EffPDef * avg_wght + _eff1 * _dt ) * _W; EffPDefBnd = ( EffPDefBnd * avg_wght + _eff2 * _dt ) * _W; Pthrust = ( Pthrust * avg_wght + _pT * _dt ) * _W; Pdrag = ( Pdrag * avg_wght + _pD * _dt ) * _W; ToD = ( ToD * avg_wght + _ToD * _dt ) * _W; avg_wght += _dt; battery += _dt * defPowerBnd; thExp = (1-_dt) * thExp + _dt * _th; vxExp = (1-_dt) * vxExp + _dt * _vx; vyExp = (1-_dt) * vyExp + _dt * _vy; avExp = (1-_dt) * avExp + _dt * _av; } //inst quantitites double Xrel = 0, Xabs = 0, Xpov = 0, Yrel = 0, Yabs = 0, Ypov = 0, Theta = 0; double VxInst = 0, VyInst = 0, AvInst = 0, VX = 0, VY = 0, AV = 0; double phaseShift = 0, Dist = 0, Quad = 0, RelAng = 0; double battery = 1, ext_X = -1, ext_Y = -1, ext_Z = -1; void updateInstant( const double _xR, const double _xA, const double _yR, const double _yA, const double _th, const double _vx, const double _vy, const double _av) { Xrel = _xR; Xabs = _xA; Yrel = _yR; Yabs = _yA; Theta= _th; VxInst=_vx; VyInst=_vy; AvInst=_av; if (Xrel<0.05 || Yrel<0.025) bRestart = true; if (ext_X>0 && ext_X-Xrel<0.2) bRestart = true; if (ext_Y>0 && ext_Y-Yrel<.025) bRestart = true; } //sensors vector<double> FPAbove, FVAbove, FPBelow, FVBelow; vector<double> PXAbove, PYAbove, PXBelow, PYBelow; vector<double> raySight; vector<vector<double>> loadedActions; StateReward(const double _lengthscale = 1, const double _timescale = 1) : lengthscale(_lengthscale), timescale(_timescale) { //printf("scales: %f %f %f %f %f", // lengthscale,timescale,velscale,forcescale,powerscale); FPAbove.resize(NpLatLine,0); FVAbove.resize(NpLatLine,0); FPBelow.resize(NpLatLine,0); FVBelow.resize(NpLatLine,0); PXAbove.resize(NpLatLine,0); PYAbove.resize(NpLatLine,0); PXBelow.resize(NpLatLine,0); PYBelow.resize(NpLatLine,0); raySight.resize(2*NpLatLine,0); } StateReward& operator= (const StateReward& s) { lengthscale = s.lengthscale; timescale = s.timescale; velscale = lengthscale/timescale; forcescale = velscale*velscale*lengthscale*lengthscale; //l^4/t^2 powerscale = forcescale*velscale; //rho*l^3 * l/t^2 * l/t #ifdef __RL_TRAINING printf("scales: %f %f %f %f %f", lengthscale,timescale,velscale,forcescale,powerscale); #endif FPAbove.resize(NpLatLine,0); FVAbove.resize(NpLatLine,0); FPBelow.resize(NpLatLine,0); FVBelow.resize(NpLatLine,0); PXAbove.resize(NpLatLine,0); PYAbove.resize(NpLatLine,0); PXBelow.resize(NpLatLine,0); PYBelow.resize(NpLatLine,0); raySight.resize(2*NpLatLine,0); return *this; } void parseArguments(ArgumentParser & parser) { bInteractive = parser("-interactive").asBool(false); Tstartlearn = parser("-Tstartlearn").asDouble(bInteractive ? timescale : 1e9); GoalDX = parser("-GoalDX").asDouble(0); //nActions = parser("-nActions").asInt(2); bForgiving = parser("-easyFailBox").asBool(false); randomStart = parser("-randomStart").asBool(false); bLoadedActions = parser("-useLoadedActions").asBool(false); //hardcoded to compute avg state components for halfT b4 first comm... iffy t_next_comm = Tstartlearn;// - timescale/2; if (bLoadedActions) readLoadedActions(); printf("scales: %f %f %f %f %f, %d, %f, %f, %d, %d, %d\n", lengthscale,timescale,velscale,forcescale,powerscale, bInteractive, Tstartlearn, GoalDX, bForgiving, randomStart, bLoadedActions); } vector<double> useLoadedActions() { if (loadedActions.size()>1) { vector<double> actions = loadedActions.back(); loadedActions.pop_back(); return actions; } //else zero actions else return vector<double>(); } void readLoadedActions(const int nActions = 2) { double dummy_time; vector<double> action(nActions); ifstream in("orders_1.txt"); std::string line; if(in.good()) { while (getline(in, line)) { std::istringstream line_in(line); if(nActions==2) line_in >> dummy_time >> action[0] >> action[1]; else line_in >> dummy_time >> action[0]; //i want to do pop back later: loadedActions.insert(loadedActions.begin(),action); } in.close(); } } void updateStepId(const int _stepId) {stepId=_stepId;} void finalize(const double xFOR, const double yFOR, const double thFOR, const double vxFOR, const double vyFOR, const double avFOR) { //velocity of reference from fish pov VX = (VxInst-vxFOR)*std::cos(Theta) + (VyInst-vyFOR)*std::sin(Theta); VY = (VyInst-vyFOR)*std::cos(Theta) - (VxInst-vxFOR)*std::sin(Theta); AV = (AvInst-avFOR); //velocity of fish in reference pov const double vxAvg = VxAvg, vyAvg = VyAvg; VxAvg = vxAvg*std::cos(Theta) + vyAvg*std::sin(Theta); VyAvg = vyAvg*std::cos(Theta) - vxAvg*std::sin(Theta); AvAvg = AvAvg; //position in reference frame Xpov = (Xrel-xFOR)*std::cos(thFOR) + (Yrel-yFOR)*std::sin(thFOR); Ypov = (Yrel-yFOR)*std::cos(thFOR) - (Xrel-xFOR)*std::sin(thFOR); RelAng = Theta - thFOR; const double Xframe=(xFOR-Xrel)*std::cos(Theta)+(yFOR-Yrel)*std::sin(Theta); const double Yframe=(yFOR-Yrel)*std::cos(Theta)-(xFOR-Xrel)*std::sin(Theta); Dist = std::sqrt(std::pow(Xrel-xFOR,2) + std::pow(Yrel-yFOR,2)); Quad = std::atan2(Yframe, Xframe); } bool checkTerm_LeadFollower(const double xFOR, const double yFOR, const double thFOR,const double vxFOR,const double vyFOR,const double avFOR) { checkTerm_bounds(xFOR, yFOR); if(not bInteractive or bRestart) return bRestart; const double _Xrel = (Xrel-xFOR)*cos(thFOR) + (Yrel-yFOR)*sin(thFOR); const double _Yrel = (Yrel-yFOR)*cos(thFOR) - (Xrel-xFOR)*sin(thFOR); const double _thRel= Theta - thFOR; const double _Dist = sqrt(pow(Xrel-xFOR,2) + pow(Yrel-yFOR,2)); bRestart = _Dist < .25*lengthscale; if(bRestart) {printf("Too close\n"); return bRestart;} //at DX=1, allowed DY=.5, at DX=2.5 allowed DY=.75 bRestart = fabs(_Yrel)>(bForgiving?lengthscale: _Xrel/6 + 7*lengthscale/12); if(bRestart) {printf("Too much vertical distance\n"); return bRestart;} #ifdef __ExploreHalfWake bRestart = _Yrel < -.1*lengthscale; if(bRestart) {printf("Wrong half of the wake\n"); return bRestart;} #endif bRestart = std::fabs(_thRel)> (bForgiving ? M_PI : M_PI/2); if(bRestart) {printf("Too different inclination\n"); return bRestart;} bRestart = _Xrel < lengthscale || _Xrel > 2.5*lengthscale; if(bRestart) {printf("Too far from horizontal goal\n"); return bRestart;} return bRestart; } bool checkTerm_DcylFollower(const double xFOR,const double yFOR, const double thFOR,const double vxFOR,const double vyFOR,const double avFOR) { if (bRestart) printf("Already ended\n"); if(not bInteractive or bRestart) return bRestart; for(int i=0; i<NpLatLine; i++) { if(PXAbove[i]< xFOR||PXBelow[i]< xFOR) {printf("Touching\n"); bRestart=1;} if(PXAbove[i]> 0.8||PXBelow[i]> 0.8) {printf("Boundary\n"); bRestart=1;} if(PYAbove[i]< 0||PYBelow[i]< 0) {printf("Boundary\n"); bRestart=1;} if(PYAbove[i]>ext_Y||PYBelow[i]>ext_Y) {printf("Boundary\n"); bRestart=1;} if(bRestart) return bRestart; } const double _Xrel = (Xrel-xFOR)*cos(thFOR) + (Yrel-yFOR)*sin(thFOR); const double _Yrel = (Yrel-yFOR)*cos(thFOR) - (Xrel-xFOR)*sin(thFOR); const double _Dist = sqrt(pow(Xrel-xFOR,2) + pow(Yrel-yFOR,2)); (void)_Xrel; // To stop complaining about unused variables. (void)_Yrel; (void)_Dist; bRestart = std::fabs(_Yrel) > 2*lengthscale; if(bRestart) {printf("Too much vertical distance\n"); return bRestart;} bRestart = std::fabs(Theta)>M_PI; if(bRestart) {printf("Too different inclination\n"); return bRestart;} return bRestart; } bool checkTerm_bounds(const double xFOR, const double yFOR) { if ( Xrel<.05*lengthscale || Yrel<.025*lengthscale) bRestart = true; if ( ext_X>0 && ext_X-Xrel < .2 *lengthscale ) bRestart = true; if ( ext_Y>0 && ext_Y-Yrel < .025*lengthscale ) bRestart = true; if (bRestart) printf("Out of bounds\n"); return bRestart; } struct skinForcesVels { skinForcesVels(const int _nDest) : nDest(_nDest), data(_alloc(5*_nDest)) { memset(data, 0, sizeof(double)*5*nDest); } virtual ~skinForcesVels() { _dealloc(data); } inline void storeNearest(const double fxP, const double fyP, const double fxV, const double fyV, const int i) { data[i+0*nDest] += fxP; data[i+1*nDest] += fyP; data[i+2*nDest] += fxV; data[i+3*nDest] += fyV; data[i+4*nDest] += 1.; } inline double fxP(const int i) { return data[i+0*nDest]; } inline double fyP(const int i) { return data[i+1*nDest]; } inline double fxV(const int i) { return data[i+2*nDest]; } inline double fyV(const int i) { return data[i+3*nDest]; } void synchronize(const MPI_Comm comm) { //int rank; //MPI_Comm_rank(comm, &rank); #ifndef CUP_SINGLE_PRECISION MPI_Allreduce(MPI_IN_PLACE, data, 5*nDest, MPI_DOUBLE, MPI_SUM, comm); #else //CUP_SINGLE_PRECISION MPI_Allreduce(MPI_IN_PLACE, data, 5*nDest, MPI_FLOAT, MPI_SUM, comm); #endif// } void print(const MPI_Comm comm, const int stepNumber) { int rank; MPI_Comm_rank(comm, &rank); if(rank) return; ofstream fout; char buf[500]; sprintf(buf, "midplaneData_%07d.txt", stepNumber); string filename(buf); fout.open(filename, ios::trunc); for(int i=0; i<nDest; ++i) fout<<fxP(i)<<"\t"<<fyP(i)<<"\t"<<fxV(i)<<"\t"<<fyV(i)<<"\t"<<std::endl; fout.close(); } private: const int nDest; double*const data; double * _alloc(const int N) { return new double[N]; } void _dealloc(double * ptr) { if(ptr not_eq nullptr) { delete [] ptr; ptr=nullptr; } } }; typedef const Real*const constAry; void nearestGridPoints( const std::map<int,ObstacleBlock*>& obstacleBlocks, const vector<BlockInfo>& vInfo, const int Nskin, constAry xU, constAry yU, constAry xL, constAry yL, constAry nxU, constAry nyU, constAry nxL, constAry nyL, const double zObst, const double h, const MPI_Comm comm) { constexpr int BS = FluidBlock::BS; skinForcesVels data(Nskin*2); const double eps = 10*std::numeric_limits<double>::epsilon(); const unsigned NB = vInfo.size(); #pragma omp parallel for schedule(dynamic) for (int j=0; j<2*Nskin; j++) { const double X = j>=Nskin ? xL[j-Nskin] : xU[j]; const double Y = j>=Nskin ? yL[j-Nskin] : yU[j]; for(unsigned i=0; i<NB; i++) { const BlockInfo I = vInfo[i]; const auto pos = obstacleBlocks.find(I.blockID); if(pos == obstacleBlocks.end()) continue; if(pos->second->nPoints == 0) continue; const auto& o = pos->second; assert(o->filled); double max_pos[3], min_pos[3]; I.pos(min_pos, 0, 0, 0); I.pos(max_pos, BS-1, BS-1, BS-1); if(zObst-max_pos[2]>h+eps || min_pos[2]-zObst>h+eps) continue; if(Y -max_pos[1]>h+eps || min_pos[1]-Y >h+eps) continue; if(X -max_pos[0]>h+eps || min_pos[0]-X >h+eps) continue; for(int k=0; k<pos->second->nPoints; k++) { if(std::fabs(o->pZ[k]-zObst)>h+eps) continue; if(std::fabs(o->pY[k]-Y) >h+eps) continue; if(std::fabs(o->pX[k]-X) >h+eps) continue; //printf("%f %f %f %f\n",o->fxP[k],o->fyP[k],o->fxV[k],o->fyV[k]); data.storeNearest(o->fxP[k], o->fyP[k], o->fxV[k], o->fyV[k], j); } } } data.synchronize(comm); //data.print(comm,stepId); /* int rank; MPI_Comm_rank(comm, &rank); if(!rank) { ofstream fileskin; char buf[500]; sprintf(buf, "skinPoints_%07d.txt", stepId); string filename(buf); fileskin.open(filename, ios::trunc); for (int j=0; j<Nskin; j++) fileskin<<xU[j]<<"\t"<<yU[j]<<std::endl; for (int j=Nskin-1; j>=0; j--) fileskin<<xL[j]<<"\t"<<yL[j]<<std::endl; fileskin.close(); } */ vector<double> NxAbove(NpLatLine,0), NyAbove(NpLatLine,0); vector<double> NxBelow(NpLatLine,0), NyBelow(NpLatLine,0); //now, feed the sensors for (int k=0; k<NpLatLine; k++) { const int first = k *(double)Nskin/(double)NpLatLine; const int last = (k+1)*(double)Nskin/(double)NpLatLine; double FPxAbove=0, FPxBelow=0, FPyAbove=0, FPyBelow=0; double FVxAbove=0, FVxBelow=0, FVyAbove=0, FVyBelow=0; for (int j=first; j<last; j++) { FPxAbove += data.fxP(j); FPxBelow += data.fxP(j+Nskin); FPyAbove += data.fyP(j); FPyBelow += data.fyP(j+Nskin); FVxAbove += data.fxV(j); FVxBelow += data.fxV(j+Nskin); FVyAbove += data.fyV(j); FVyBelow += data.fyV(j+Nskin); } const int mid = 0.5*(first+last); PXAbove[k] = xU[mid]; PYAbove[k] = yU[mid]; PXBelow[k] = xL[mid]; PYBelow[k] = yL[mid]; const double nxAbove = nxU[mid]; // ^ ^ const double nyAbove = nyU[mid]; // ` / const double txAbove = nyU[mid]; // n ` / t const double tyAbove =-nxU[mid]; // ` NxAbove[k] = nxAbove; NyAbove[k] = nyAbove; const double nxBelow = nxL[mid]; // /` const double nyBelow = nyL[mid]; // n / ` t const double txBelow =-nyL[mid]; // / ` const double tyBelow = nxL[mid]; // v v NxBelow[k] = nxBelow; NyBelow[k] = nyBelow; FPAbove[k] = FPxAbove*nxAbove + FPyAbove*nyAbove; FVAbove[k] = FVxAbove*txAbove + FVyAbove*tyAbove; FPBelow[k] = FPxBelow*nxBelow + FPyBelow*nyBelow; FVBelow[k] = FVxBelow*txBelow + FVyBelow*tyBelow; } if(0){ ofstream fileskin; char buf[500]; sprintf(buf, "sensorDistrib_%07d.txt", stepId); string filename(buf); fileskin.open(filename, ios::trunc); // int k=0; for(int i=0; i<NpLatLine; ++i) fileskin<<PXAbove[i]<<"\t"<<PYAbove[i]<<"\t"<<NxAbove[i] <<"\t"<<NyAbove[i]<<"\t"<<FPAbove[i]<<"\t"<<FVAbove[i] //<<"\t"<<raySight[k++] <<std::endl; for(int i=0; i<NpLatLine; ++i) fileskin<<PXBelow[i]<<"\t"<<PYBelow[i]<<"\t"<<NxBelow[i] <<"\t"<<NyBelow[i]<<"\t"<<FPBelow[i]<<"\t"<<FVBelow[i] //<<"\t"<<raySight[k++] <<std::endl; fileskin.close(); } } void save(const int step_id, string filename) { ofstream savestream; savestream.setf(std::ios::scientific); savestream.precision(std::numeric_limits<double>::digits10 + 1); string fullFileName = filename==string() ? "restart_IF2D_Stefan" : filename; savestream.open(fullFileName+"_save_data.txt"); savestream << bRestart << "\t" << info << "\t" << avg_wght << "\t" << t_next_comm << "\t" << Xrel << "\t" << Xabs << "\t" << Yrel << "\t" << Yabs << "\t" << Theta << "\t" << VxAvg << "\t" << VyAvg<< "\t" << AvAvg << "\t" << thExp << "\t" << vxExp << "\t" << vyExp<< "\t" << avExp << "\t" << VxInst << "\t" << VyInst<< "\t" << AvInst << "\t" << Dist << "\t" << Quad << "\t" << RelAng<< "\t" << VX << "\t" << VY << "\t" << AV << "\t" << ThetaAvg<<"\t"<<ThetaVel<<"\t"<<PoutBnd<<"\t"<<Pout << "\t" << defPowerBnd<<"\t"<<defPower<<"\t"<<EffPDefBnd<<"\t"<<EffPDef << "\t" << Pthrust << "\t" << Pdrag << "\t" << ToD << std::endl; for (int i=0; i<NpLatLine; i++) { savestream << PXAbove[i] << "\t" << PYAbove[i] << "\t" << PXBelow[i] << "\t" << PYBelow[i] << "\t" << FPAbove[i] << "\t" << FVAbove[i] << "\t" << FPBelow[i] << "\t" << FVBelow[i] << std::endl; } savestream.close(); } void restart(string filename) { ifstream restartstream; string fullFileName = filename; restartstream.open(fullFileName+"_save_data.txt"); if(not restartstream.good()) return; restartstream >> bRestart >> info >> avg_wght >> t_next_comm >> Xrel >> Xabs >> Yrel >> Yabs >> Theta >> VxAvg >> VyAvg >> AvAvg >> thExp >> vxExp >> vyExp >> avExp >> VxInst >> VyInst >> AvInst >> Dist >> Quad >> RelAng >> VX >> VY >> AV >> ThetaAvg >> ThetaVel >> PoutBnd >> Pout >> defPowerBnd >> defPower >> EffPDefBnd>> EffPDef >> Pthrust >> Pdrag >> ToD; for (int i=0; i<NpLatLine; i++) { restartstream >> PXAbove[i] >> PYAbove[i] >> PXBelow[i] >> PYBelow[i] >> FPAbove[i] >> FVAbove[i] >> FPBelow[i] >> FVBelow[i]; } restartstream.close(); int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); if (rank==0) { cout << bRestart << "\t" << info << "\t" << avg_wght << "\t" << t_next_comm << "\t" << Xrel << "\t" << Xabs << "\t" << Yrel << "\t" << Yabs << "\t" << Theta << "\t" << VxAvg << "\t" << VyAvg<< "\t" << AvAvg << "\t" << thExp << "\t" << vxExp << "\t" << vyExp<< "\t" << avExp << "\t" << VxInst << "\t" << VyInst<< "\t" << AvInst << "\t" << Dist << "\t" << Quad << "\t" << RelAng<< "\t" << VX << "\t" << VY << "\t" << AV << "\t" << ThetaAvg << "\t" << ThetaVel << "\t" << PoutBnd << "\t" << Pout << "\t" << defPowerBnd << "\t" << defPower << "\t" << EffPDefBnd<< "\t" << EffPDef << "\t" << Pthrust << "\t" << Pdrag << "\t" << ToD << std::endl; for (int i=0; i<NpLatLine; i++) { cout << PXAbove[i] << "\t" << PYAbove[i] << "\t" << PXBelow[i] << "\t" << PYBelow[i] << "\t" << FPAbove[i] << "\t" << FVAbove[i] << "\t" << FPBelow[i] << "\t" << FVBelow[i] << std::endl; } } } void print(const int ID, const int stepNumber, const double time) { //int rank; //MPI_Comm_rank(MPI_COMM_WORLD,&rank); //if (rank) return; { ofstream fileskin; char buf[500]; sprintf(buf, "sensorDistrib_%1d_%07d.txt", ID, stepNumber); string filename(buf); fileskin.open(filename, ios::trunc); int k=0; for(int i=0; i<NpLatLine; ++i) fileskin<<PXAbove[i]<<"\t"<<PYAbove[i]<<"\t"<<FPAbove[i]<<"\t"<<FVAbove[i]<<"\t"<<raySight[k++]<<std::endl; for(int i=0; i<NpLatLine; ++i) fileskin<<PXBelow[i]<<"\t"<<PYBelow[i]<<"\t"<<FPBelow[i]<<"\t"<<FVBelow[i]<<"\t"<<raySight[k++]<<std::endl; fileskin.close(); } { ofstream fileskin; char buf[500]; sprintf(buf, "avgSensors_%1d.txt",ID); string filename(buf); fileskin.open(filename, ios::app); fileskin<< avg_wght << "\t" << t_next_comm << "\t" << Xrel << "\t" << Xabs << "\t" << Yrel << "\t" << Yabs << "\t" << Theta << "\t" << VxAvg << "\t" << VyAvg<< "\t" << AvAvg << "\t" << thExp << "\t" << vxExp << "\t" << vyExp<< "\t" << avExp << "\t" << VxInst << "\t" << VyInst<< "\t" << AvInst << "\t" << Dist << "\t" << Quad << "\t" << RelAng<< "\t" << VX << "\t" << VY << "\t" << AV << "\t" << ThetaAvg << "\t" << ThetaVel << "\t" << PoutBnd << "\t" << Pout << "\t" << defPowerBnd << "\t" << defPower << "\t" << EffPDefBnd<< "\t" << EffPDef << "\t" << Pthrust << "\t" << Pdrag << "\t" << ToD << std::endl; fileskin.close(); } } vector<double> fillState(const double time, const int nStateVars, const int nActions = 2) { if(bRestart) { if(info==1) printf("Reached termination before first action!!!\n"); info = 2; } vector<double> state(nStateVars, 0); int k = 0; //state[k++] = sr.Xpov*invlscale - GoalDX; state[k++] = Xpov / lengthscale; state[k++] = Ypov / lengthscale; state[k++] = RelAng; state[k++] = std::fmod(time, timescale); //1 is Tperiod of leader state[k++] = new_curv; state[k++] = old_curv; if(nActions==2) { state[k++] = new_Tp; state[k++] = phaseShift; state[k++] = VX / velscale; state[k++] = VY / velscale; state[k++] = AV / velscale; } state[k++] = Dist / lengthscale; state[k++] = Quad; state[k++] = VxAvg / velscale; state[k++] = VyAvg / velscale; state[k++] = AvAvg / velscale; state[k++] = Pout / powerscale; state[k++] = defPower / powerscale; state[k++] = EffPDef; state[k++] = PoutBnd / powerscale; state[k++] = defPowerBnd / powerscale; state[k++] = EffPDefBnd; state[k++] = Pthrust / powerscale; state[k++] = Pdrag / powerscale; state[k++] = ToD; if(nStateVars>=k+4*NpLatLine) { for (int j=0; j<NpLatLine; j++) state[k++] = FPAbove[j] / forcescale; //for (int j=0; j<NpLatLine; j++) printf("FPAbove %d %f\n",j,FPAbove[j]); for (int j=0; j<NpLatLine; j++) state[k++] = FVAbove[j] / forcescale; //for (int j=0; j<NpLatLine; j++) printf("FVAbove %d %f\n",j,FVAbove[j]); for (int j=0; j<NpLatLine; j++) state[k++] = FPBelow[j] / forcescale; //for (int j=0; j<NpLatLine; j++) printf("FPBelow %d %f\n",j,FPBelow[j]); for (int j=0; j<NpLatLine; j++) state[k++] = FVBelow[j] / forcescale; //for (int j=0; j<NpLatLine; j++) printf("FVBelow %d %f\n",j,FVBelow[j]); } if(nStateVars>=k+2*NpLatLine) for (int j=0;j<2*NpLatLine;j++) state[k++] = raySight[j] / lengthscale; return state; } }; CubismUP_3D_NAMESPACE_END #endif // CubismUP_3D_StateRewardData_h
ark_heat1D_adapt_ompdev.c
/*--------------------------------------------------------------- * Programmer(s): Shelby Lockhart @ LLNL *--------------------------------------------------------------- * Based on the serial code ark_heat1D_adapt.c developed * by Daniel R. Reynolds and parallelized with OpenMP 4.5 *--------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *--------------------------------------------------------------- * Example problem: * * The following test simulates a simple 1D heat equation, * u_t = k*u_xx + f * for t in [0, 10], x in [0, 1], with initial conditions * u(0,x) = 0 * Dirichlet boundary conditions, i.e. * u_t(t,0) = u_t(t,1) = 0, * and a heating term of the form * f = 2*exp(-200*(x-0.25)*(x-0.25)) * - exp(-400*(x-0.7)*(x-0.7)) * + exp(-500*(x-0.4)*(x-0.4)) * - 2*exp(-600*(x-0.55)*(x-0.55)); * * The spatial derivatives are computed using a three-point * centered stencil (second order for a uniform mesh). The data * is initially uniformly distributed over N points in the interval * [0, 1], but as the simulation proceeds the mesh is adapted. * * This program solves the problem with a DIRK method, solved with * a Newton iteration and SUNLinSol_PCG linear solver, with a * user-supplied Jacobian-vector product routine. *---------------------------------------------------------------*/ /* Header files */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */ #include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */ #include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */ #include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */ #include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */ #ifdef _OPENMP #include <omp.h> /* OpenMP functions */ #endif #if defined(SUNDIALS_EXTENDED_PRECISION) #define GSYM "Lg" #define ESYM "Le" #define FSYM "Lf" #else #define GSYM "g" #define ESYM "e" #define FSYM "f" #endif /* constants */ #define ZERO RCONST(0.0) #define PT25 RCONST(0.25) #define PT4 RCONST(0.4) #define PT5 RCONST(0.5) #define PT55 RCONST(0.55) #define PT7 RCONST(0.7) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define TWOHUNDRED RCONST(200.0) #define FOURHUNDRED RCONST(400.0) #define FIVEHUNDRED RCONST(500.0) #define SIXHUNDRED RCONST(600.0) /* user data structure */ typedef struct { sunindextype N; /* current number of intervals */ realtype *x_host; /* current mesh on host */ realtype *x_dev; /* current mesh on device */ realtype k; /* diffusion coefficient */ realtype refine_tol; /* adaptivity tolerance */ } *UserData; /* User-supplied Functions Called by the Solver */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data); static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *user_data, N_Vector tmp); /* Private function to check function return values */ realtype * adapt_mesh(N_Vector y, sunindextype *Nnew, UserData udata); static int project(sunindextype Nold, realtype *xold, N_Vector yold, sunindextype Nnew, realtype *xnew, N_Vector ynew); static int check_flag(void *flagvalue, const char *funcname, int opt); /* Main Program */ int main() { /* general problem parameters */ realtype T0 = RCONST(0.0); /* initial time */ realtype Tf = RCONST(1.0); /* final time */ realtype rtol = RCONST(1.e-3); /* relative tolerance */ realtype atol = RCONST(1.e-10); /* absolute tolerance */ realtype hscale = RCONST(1.0); /* time step change factor on resizes */ UserData udata = NULL; realtype *data; sunindextype N = 21; /* initial spatial mesh size */ realtype refine = RCONST(3.0e-3); /* adaptivity refinement tolerance */ realtype k = RCONST(0.5); /* heat conductivity */ sunindextype i; long int nni, nni_tot=0, nli, nli_tot=0; int iout=0; /* general problem variables */ int flag; /* reusable error-checking flag */ N_Vector y = NULL; /* empty vector for storing solution */ N_Vector y2 = NULL; /* empty vector for storing solution */ N_Vector yt = NULL; /* empty vector for swapping */ SUNLinearSolver LS = NULL; /* empty linear solver object */ void *arkode_mem = NULL; /* empty ARKode memory structure */ FILE *XFID, *UFID; realtype t, olddt, newdt; realtype *xnew_host = NULL; realtype *xnew_dev = NULL; sunindextype Nnew; int dev, host; /* get host and offloading device */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* allocate and fill initial udata structure */ udata = (UserData) malloc(sizeof(*udata)); udata->N = N; udata->k = k; udata->refine_tol = refine; udata->x_host = malloc(N * sizeof(realtype)); for (i=0; i<N; i++) udata->x_host[i] = ONE*i/(N-1); udata->x_dev = omp_target_alloc(N * sizeof(realtype), dev); omp_target_memcpy(udata->x_dev, udata->x_host, N * sizeof(realtype), 0, 0, dev, host); /* Initial problem output */ printf("\n1D adaptive Heat PDE test problem:\n"); printf(" diffusion coefficient: k = %"GSYM"\n", udata->k); printf(" initial N = %li\n", (long int) udata->N); /* Initialize data structures */ y = N_VNew_OpenMPDEV(N); /* Create initial OpenMPDEV vector for solution */ if (check_flag((void *) y, "N_VNew_OpenMPDEV", 0)) return 1; N_VConst(ZERO, y); /* Set initial conditions */ /* output mesh to disk */ XFID=fopen("heat_mesh.txt","w"); /* output initial mesh to disk */ for (i=0; i<udata->N; i++) fprintf(XFID," %.16"ESYM, udata->x_host[i]); fprintf(XFID,"\n"); /* Open output stream for results, access data array */ UFID=fopen("heat1D.txt","w"); /* output initial condition to disk */ N_VCopyFromDevice_OpenMPDEV(y); data = N_VGetHostArrayPointer_OpenMPDEV(y); for (i=0; i<udata->N; i++) fprintf(UFID," %.16"ESYM, data[i]); fprintf(UFID,"\n"); /* Initialize the ARK timestepper */ arkode_mem = ARKStepCreate(NULL, f, T0, y); if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1; /* Set routines */ flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */ if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1; flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */ if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1; flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */ if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1; flag = ARKStepSetAdaptivityMethod(arkode_mem, 2, 1, 0, NULL); /* Set adaptivity method */ if (check_flag(&flag, "ARKStepSetAdaptivityMethod", 1)) return 1; flag = ARKStepSetPredictorMethod(arkode_mem, 0); /* Set predictor method */ if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1; /* Specify linearly implicit RHS, with time-dependent Jacobian */ flag = ARKStepSetLinear(arkode_mem, 1); if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1; /* Initialize PCG solver -- no preconditioning, with up to N iterations */ LS = SUNLinSol_PCG(y, 0, (int) N); if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1; /* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */ flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */ if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */ if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1; /* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then prints results. Stops when the final time has been reached */ t = T0; olddt = ZERO; newdt = ZERO; printf(" iout dt_old dt_new ||u||_rms N NNI NLI\n"); printf(" ----------------------------------------------------------------------------------------\n"); printf(" %4i %19.15"ESYM" %19.15"ESYM" %19.15"ESYM" %li %2i %3i\n", iout, olddt, newdt, SUNRsqrt(N_VDotProd(y,y)/udata->N), (long int) udata->N, 0, 0); while (t < Tf) { /* "set" routines */ flag = ARKStepSetStopTime(arkode_mem, Tf); if (check_flag(&flag, "ARKStepSetStopTime", 1)) return 1; flag = ARKStepSetInitStep(arkode_mem, newdt); if (check_flag(&flag, "ARKStepSetInitStep", 1)) return 1; /* call integrator */ flag = ARKStepEvolve(arkode_mem, Tf, y, &t, ARK_ONE_STEP); if (check_flag(&flag, "ARKStepEvolve", 1)) return 1; /* "get" routines */ flag = ARKStepGetLastStep(arkode_mem, &olddt); if (check_flag(&flag, "ARKStepGetLastStep", 1)) return 1; flag = ARKStepGetCurrentStep(arkode_mem, &newdt); if (check_flag(&flag, "ARKStepGetCurrentStep", 1)) return 1; flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni); if (check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1)) return 1; flag = ARKStepGetNumLinIters(arkode_mem, &nli); if (check_flag(&flag, "ARKStepGetNumLinIters", 1)) return 1; /* print current solution stats */ iout++; printf(" %4i %19.15"ESYM" %19.15"ESYM" %19.15"ESYM" %li %2li %3li\n", iout, olddt, newdt, SUNRsqrt(N_VDotProd(y,y)/udata->N), (long int) udata->N, nni, nli); nni_tot += nni; nli_tot += nli; /* output results and current mesh to disk */ N_VCopyFromDevice_OpenMPDEV(y); data = N_VGetHostArrayPointer_OpenMPDEV(y); for (i=0; i<udata->N; i++) fprintf(UFID," %.16"ESYM, data[i]); fprintf(UFID,"\n"); for (i=0; i<udata->N; i++) fprintf(XFID," %.16"ESYM, udata->x_host[i]); fprintf(XFID,"\n"); /* adapt the spatial mesh */ xnew_host = adapt_mesh(y, &Nnew, udata); if (check_flag(xnew_host, "ark_adapt", 0)) return 1; /* create N_Vector of new length */ y2 = N_VNew_OpenMPDEV(Nnew); if (check_flag((void *) y2, "N_VNew_OpenMPDEV", 0)) return 1; /* copy new mesh from host array to device array */ xnew_dev = omp_target_alloc(Nnew * sizeof(realtype), dev); omp_target_memcpy(xnew_dev, xnew_host, Nnew*sizeof(realtype), 0, 0, dev, host); /* project solution onto new mesh */ flag = project(udata->N, udata->x_dev, y, Nnew, xnew_dev, y2); if (check_flag(&flag, "project", 1)) return 1; /* delete old vector, old mesh */ N_VDestroy(y); free(udata->x_host); omp_target_free(udata->x_dev, dev); /* swap x and xnew so that new mesh is stored in udata structure */ udata->x_host = xnew_host; xnew_host = NULL; udata->N = Nnew; /* store size of new mesh */ udata->x_dev = xnew_dev; xnew_dev = NULL; /* swap y and y2 so that y holds new solution */ yt = y; y = y2; y2 = yt; /* call ARKStepResize to notify integrator of change in mesh */ flag = ARKStepResize(arkode_mem, y, hscale, t, NULL, NULL); if (check_flag(&flag, "ARKStepResize", 1)) return 1; /* destroy and re-allocate linear solver memory; reattach to ARKStep interface */ SUNLinSolFree(LS); LS = SUNLinSol_PCG(y, 0, (int) N); if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1; flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1; } printf(" ----------------------------------------------------------------------------------------\n"); /* print some final statistics */ printf(" Final solver statistics:\n"); printf(" Total number of time steps = %i\n", iout); printf(" Total nonlinear iterations = %li\n", nni_tot); printf(" Total linear iterations = %li\n\n", nli_tot); /* Clean up and return with successful completion */ fclose(UFID); fclose(XFID); N_VDestroy(y); /* Free vectors */ free(udata->x_host); /* Free user data */ omp_target_free(udata->x_dev, dev); free(udata); ARKStepFree(&arkode_mem); /* Free integrator memory */ SUNLinSolFree(LS); /* Free linear solver */ return 0; } /*-------------------------------- * Functions called by the solver *--------------------------------*/ /* f routine to compute the ODE RHS function f(t,y). */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data) { UserData udata = (UserData) user_data; /* access problem data */ sunindextype N = udata->N; /* set variable shortcuts */ realtype k = udata->k; realtype *x = udata->x_dev; realtype *Y=NULL, *Ydot=NULL; realtype dxL, dxR; sunindextype i; int dev; dev = omp_get_default_device(); /* access data arrays */ Y = N_VGetDeviceArrayPointer_OpenMPDEV(y); if (check_flag((void *) Y, "N_VGetDeviceArrayPointer", 0)) return 1; Ydot = N_VGetDeviceArrayPointer_OpenMPDEV(ydot); if (check_flag((void *) Ydot, "N_VGetDeviceArrayPointer", 0)) return 1; /* Initialize ydot to zero - also handles boundary conditions */ N_VConst(ZERO, ydot); /* iterate over domain interior, computing all equations */ #pragma omp target map(to:N) is_device_ptr(x, Ydot, Y) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i=1; i<N-1; i++) { /* interior */ dxL = x[i]-x[i-1]; dxR = x[i+1]-x[i]; Ydot[i] = Y[i-1]*k*TWO/(dxL*(dxL+dxR)) - Y[i]*k*TWO/(dxL*dxR) + Y[i+1]*k*TWO/(dxR*(dxL+dxR)) + TWO*SUNRexp(-TWOHUNDRED*(x[i]-PT25)*(x[i]-PT25)) /* source term */ - SUNRexp(-FOURHUNDRED*(x[i]-PT7)*(x[i]-PT7)) + SUNRexp(-FIVEHUNDRED*(x[i]-PT4)*(x[i]-PT4)) - TWO*SUNRexp(-SIXHUNDRED*(x[i]-PT55)*(x[i]-PT55)); } return 0; /* Return with success */ } /* Jacobian routine to compute J(t,y) = df/dy. */ static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *user_data, N_Vector tmp) { UserData udata = (UserData) user_data; /* variable shortcuts */ sunindextype N = udata->N; realtype k = udata->k; realtype *x = udata->x_dev; realtype *V=NULL, *JV=NULL; realtype dxL, dxR; sunindextype i; int dev; dev = omp_get_default_device(); /* access data arrays */ V = N_VGetDeviceArrayPointer_OpenMPDEV(v); if (check_flag((void *) V, "N_VGetDeviceArrayPointer", 0)) return 1; JV = N_VGetDeviceArrayPointer_OpenMPDEV(Jv); if (check_flag((void *) JV, "N_VGetDeviceArrayPointer", 0)) return 1; /* initialize Jv product to zero - also handles boundary conditions */ N_VConst(ZERO, Jv); /* iterate over domain, computing all Jacobian-vector products */ #pragma omp target map(to:N) is_device_ptr(x, JV, V) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i=1; i<N-1; i++) { dxL = x[i]-x[i-1]; dxR = x[i+1]-x[i]; JV[i] = V[i-1]*k*TWO/(dxL*(dxL+dxR)) - V[i]*k*TWO/(dxL*dxR) + V[i+1]*k*TWO/(dxR*(dxL+dxR)); } return 0; /* Return with success */ } /*------------------------------- * Private helper functions *-------------------------------*/ /* Adapts the current mesh, using a simple adaptivity strategy of refining when an approximation of the scaled second-derivative is too large. We only do this in one sweep, so no attempt is made to ensure the resulting mesh meets these same criteria after adaptivity: y [input] -- the current solution vector Nnew [output] -- the size of the new mesh udata [input] -- the current system information The return for this function is a pointer to the new mesh. */ realtype* adapt_mesh(N_Vector y, sunindextype *Nnew, UserData udata) { sunindextype i, j; int *marks=NULL; realtype ydd, *xold=NULL, *Y=NULL, *xnew=NULL; sunindextype num_refine, N_new; /* Access current solution and mesh arrays */ xold = udata->x_host; Y = N_VGetHostArrayPointer_OpenMPDEV(y); /* assumes copy to host already done */ if (check_flag((void *) Y, "N_VGetHostArrayPointer_OpenMPDEV", 0)) return NULL; /* create marking array */ marks = calloc(udata->N-1, sizeof(int)); /* perform marking: 0 -> leave alone 1 -> refine */ for (i=1; i<udata->N-1; i++) { /* approximate scaled second-derivative */ ydd = Y[i-1] - TWO*Y[i] + Y[i+1]; /* check for refinement */ if (fabs(ydd) > udata->refine_tol) { marks[i-1] = 1; marks[i] = 1; } } /* allocate new mesh */ num_refine = 0; for (i=0; i<udata->N-1; i++) if (marks[i] == 1) num_refine++; N_new = udata->N + num_refine; *Nnew = N_new; /* Store new array length */ xnew = malloc((N_new) * sizeof(realtype)); /* fill new mesh */ xnew[0] = xold[0]; /* store endpoints */ xnew[N_new-1] = xold[udata->N-1]; j=1; /* iterate over old intervals */ for (i=0; i<udata->N-1; i++) { /* if mark is 0, reuse old interval */ if (marks[i] == 0) { xnew[j++] = xold[i+1]; continue; } /* if mark is 1, refine old interval */ if (marks[i] == 1) { xnew[j++] = PT5*(xold[i]+xold[i+1]); xnew[j++] = xold[i+1]; continue; } } /* verify that new mesh is legal */ for (i=0; i<N_new-1; i++) { if (xnew[i+1] <= xnew[i]) { fprintf(stderr,"adapt_mesh error: illegal mesh created\n"); free(xnew); return NULL; } } free(marks); /* Delete marking array */ return xnew; /* Return with success */ } /* Projects one vector onto another: Nold [input] -- the size of the old mesh xold [input] -- the old mesh yold [input] -- the vector defined over the old mesh Nnew [input] -- the size of the new mesh xnew [input] -- the new mesh ynew [output] -- the vector defined over the new mesh (allocated prior to calling project) */ static int project(sunindextype Nold, realtype *xold, N_Vector yold, sunindextype Nnew, realtype *xnew, N_Vector ynew) { sunindextype iv, i, j; realtype *Yold=NULL, *Ynew=NULL; int dev = omp_get_default_device(); /* Access data arrays */ Yold = N_VGetDeviceArrayPointer_OpenMPDEV(yold); /* access data arrays */ if (check_flag((void *) Yold, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; Ynew = N_VGetDeviceArrayPointer_OpenMPDEV(ynew); if (check_flag((void *) Ynew, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; /* loop over new mesh, finding corresponding interval within old mesh, and perform piecewise linear interpolation from yold to ynew */ iv=0; #pragma omp target map(to:iv) is_device_ptr(Yold,Ynew,xnew,xold) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) { for (i=0; i<Nnew; i++) { /* find old interval, start with previous value since sorted */ for (j=iv; j<Nold-1; j++) { if (xnew[i] >= xold[j] && xnew[i] <= xold[j+1]) { iv = j; break; } iv = Nold-1; /* just in case it wasn't found above */ } /* perform interpolation */ Ynew[i] = Yold[iv]*(xnew[i]-xold[iv+1])/(xold[iv]-xold[iv+1]) + Yold[iv+1]*(xnew[i]-xold[iv])/(xold[iv+1]-xold[iv]); } } return 0; /* Return with success */ } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return 1; }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } return 0; } /*---- end of file ----*/
trmv_x_csc_n_lo_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <string.h> #include <memory.h> static alphasparse_status_t trmv_csc_n_lo_trans_unroll4(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y, ALPHA_INT lrs, ALPHA_INT lre) { ALPHA_INT m = A->cols; for (ALPHA_INT i = lrs; i < lre; i++) { register ALPHA_Number tmp0; register ALPHA_Number tmp1; register ALPHA_Number tmp2; register ALPHA_Number tmp3; alpha_setzero(tmp0); alpha_setzero(tmp1); alpha_setzero(tmp2); alpha_setzero(tmp3); ALPHA_INT pks = A->cols_start[i]; ALPHA_INT pke = A->cols_end[i]; ALPHA_INT pkl = pke - pks; ALPHA_INT pkl4 = pkl - 4; ALPHA_INT row_ind0, row_ind1, row_ind2, row_ind3; ALPHA_Number *A_val = &A->values[pks]; ALPHA_INT *A_row = &A->row_indx[pks]; ALPHA_INT pi; for (pi = 0; pi < pkl4; pi += 4) { row_ind0 = A_row[pi]; row_ind1 = A_row[pi + 1]; row_ind2 = A_row[pi + 2]; row_ind3 = A_row[pi + 3]; if (row_ind0 >= i){ alpha_madde(tmp0, A_val[pi], x[row_ind0]); alpha_madde(tmp1, A_val[pi+1], x[row_ind1]); alpha_madde(tmp2, A_val[pi+2], x[row_ind2]); alpha_madde(tmp3, A_val[pi+3], x[row_ind3]); }else if (row_ind1 >= i){ alpha_madde(tmp1, A_val[pi+1], x[row_ind1]); alpha_madde(tmp2, A_val[pi+2], x[row_ind2]); alpha_madde(tmp3, A_val[pi+3], x[row_ind3]); }else if (row_ind2 >= i){ alpha_madde(tmp2, A_val[pi+2], x[row_ind2]); alpha_madde(tmp3, A_val[pi+3], x[row_ind3]); }else if (row_ind3 >= i){ alpha_madde(tmp3, A_val[pi+3], x[row_ind3]); } } for (; pi < pkl; pi += 1) { if (A_row[pi] >= i) { alpha_madde(tmp0, A_val[pi], x[A_row[pi]]); } } alpha_add(tmp0, tmp0, tmp1); alpha_add(tmp2, tmp2, tmp3); alpha_add(tmp0, tmp0, tmp2); alpha_mul(tmp0, tmp0, alpha); alpha_mul(tmp1, beta, y[i]); alpha_add(y[i], tmp0, tmp1); } return ALPHA_SPARSE_STATUS_SUCCESS; } static alphasparse_status_t trmv_csc_n_lo_trans_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { ALPHA_INT n = A->cols; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT partition[num_threads + 1]; balanced_partition_row_by_nnz(A->cols_end, n, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_n_s = partition[tid]; ALPHA_INT local_n_e = partition[tid + 1]; trmv_csc_n_lo_trans_unroll4(alpha,A,x,beta,y,local_n_s,local_n_e); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_csc_n_lo_trans_omp(alpha, A, x, beta, y); }
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_int32 // A.*B function (eWiseMult): GB_AemultB__isle_int32 // A*D function (colscale): GB_AxD__isle_int32 // D*A function (rowscale): GB_DxB__isle_int32 // C+=B function (dense accum): GB_Cdense_accumB__isle_int32 // C+=b function (dense accum): GB_Cdense_accumb__isle_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int32 // C=scalar+B GB_bind1st__isle_int32 // C=scalar+B' GB_bind1st_tran__isle_int32 // C=A+scalar GB_bind2nd__isle_int32 // C=A'+scalar GB_bind2nd_tran__isle_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isle_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ft_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <complex.h> #include <assert.h> #include "config.h" #include "cint.h" #include "gto/ft_ao.h" #include "vhf/fblas.h" #define INTBUFMAX 16000 #define IMGBLK 80 #define OF_CMPLX 2 #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) int PBCsizeof_env(int *shls_slice, int *atm, int natm, int *bas, int nbas, double *env); static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL) { env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0]; env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1]; env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2]; } /* * Multiple k-points */ static void _ft_fill_k(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), void (*fsort)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const char TRANS_N = 'N'; const double complex Z1 = 1; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; double complex *bufk = buf; double complex *bufL = buf + dij*blksize * comp * nkpts; double complex *pbuf; int gs0, gs1, dg, dijg, empty; int jL0, jLcount, jL; int i; for (gs0 = 0; gs0 < nGv; gs0 += blksize) { gs1 = MIN(gs0+blksize, nGv); dg = gs1 - gs0; dijg = dij * dg * comp; for (i = 0; i < dijg*nkpts; i++) { bufk[i] = 0; } for (jL0 = 0; jL0 < nimgs; jL0 += IMGBLK) { jLcount = MIN(IMGBLK, nimgs-jL0); pbuf = bufL; for (jL = jL0; jL < jL0+jLcount; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*intor)(pbuf, shls, dims, eval_aopair, eval_gz, Z1, sGv, b, sgxyz, gs, dg, atm, natm, bas, nbas, env_loc)) { } else { for (i = 0; i < dijg; i++) { pbuf[i] = 0; } } pbuf += dijg; } zgemm_(&TRANS_N, &TRANS_N, &dijg, &nkpts, &jLcount, &Z1, bufL, &dijg, expkL+jL0, &nimgs, &Z1, bufk, &dijg); } (*fsort)(out, bufk, shls_slice, ao_loc, nkpts, comp, nGv, ish, jsh, gs0, gs1); sGv += dg * 3; if (sgxyz != NULL) { sgxyz += dg * 3; } } } /* * Single k-point */ static void _ft_fill_nk1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), void (*fsort)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; double complex *bufk = buf; double complex *bufL = buf + dij*blksize * comp; int gs0, gs1, dg, jL, i; size_t dijg; for (gs0 = 0; gs0 < nGv; gs0 += blksize) { gs1 = MIN(gs0+blksize, nGv); dg = gs1 - gs0; dijg = dij * dg * comp; for (i = 0; i < dijg; i++) { bufk[i] = 0; } for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); if ((*intor)(bufL, shls, dims, eval_aopair, eval_gz, expkL[jL], sGv, b, sgxyz, gs, dg, atm, natm, bas, nbas, env_loc)) { for (i = 0; i < dijg; i++) { bufk[i] += bufL[i]; } } } (*fsort)(out, bufk, shls_slice, ao_loc, nkpts, comp, nGv, ish, jsh, gs0, gs1); sGv += dg * 3; if (sgxyz != NULL) { sgxyz += dg * 3; } } } static void sort_s1(double complex *out, double complex *in, int *shls_slice, int *ao_loc, int nkpts, int comp, int nGv, int ish, int jsh, int gs0, int gs1) { const size_t NGv = nGv; const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nijg = naoi * naoj * NGv; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dg = gs1 - gs0; const size_t dijg = di * dj * dg; out += (ip * naoj + jp) * NGv + gs0; int i, j, n, ic, kk; double complex *pin, *pout; for (kk = 0; kk < nkpts; kk++) { for (ic = 0; ic < comp; ic++) { for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout = out + (i*naoj+j) * NGv; pin = in + (j*di+i) * dg; for (n = 0; n < dg; n++) { pout[n] = pin[n]; } } } out += nijg; in += dijg; } } } static void sort_s2_igtj(double complex *out, double complex *in, int *shls_slice, int *ao_loc, int nkpts, int comp, int nGv, int ish, int jsh, int gs0, int gs1) { const size_t NGv = nGv; const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijg = nij * NGv; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dg = gs1 - gs0; const size_t dijg = dij * dg; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0; const int ip1 = ao_loc[ish] + 1; int i, j, n, ic, kk; double complex *pin, *pout; for (kk = 0; kk < nkpts; kk++) { for (ic = 0; ic < comp; ic++) { pout = out; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + (j*di+i) * dg; for (n = 0; n < dg; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } out += nijg; in += dijg; } } } static void sort_s2_ieqj(double complex *out, double complex *in, int *shls_slice, int *ao_loc, int nkpts, int comp, int nGv, int ish, int jsh, int gs0, int gs1) { const size_t NGv = nGv; const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijg = nij * NGv; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dg = gs1 - gs0; const size_t dijg = dij * dg; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0; const int ip1 = ao_loc[ish] + 1; int i, j, n, ic, kk; double complex *pin, *pout; for (kk = 0; kk < nkpts; kk++) { for (ic = 0; ic < comp; ic++) { pout = out; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + (j*di+i) * dg; for (n = 0; n < dg; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } out += nijg; in += dijg; } } } void PBC_ft_fill_ks1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { _ft_fill_k(intor, eval_aopair, eval_gz, &sort_s1, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } void PBC_ft_fill_ks2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_igtj, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } else if (ip == jp) { _ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_ieqj, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } } void PBC_ft_fill_nk1s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { _ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } void PBC_ft_fill_nk1s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip >= jp) { _ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } } void PBC_ft_fill_nk1s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(), double complex *out, int nkpts, int comp, int nimgs, int blksize, int ish, int jsh, double complex *buf, double *env_loc, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *sGv, double *b, int *sgxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_igtj, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } else if (ip == jp) { _ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_ieqj, out, nkpts, comp, nimgs, blksize, ish, jsh, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } } static int subgroupGv(double *sGv, int *sgxyz, double *Gv, int *gxyz, int nGv, int bufsize, int *shls_slice, int *ao_loc, int *atm, int natm, int *bas, int nbas, double *env) { int i; int dimax = 0; int djmax = 0; for (i = shls_slice[0]; i < shls_slice[1]; i++) { dimax = MAX(dimax, ao_loc[i+1]-ao_loc[i]); } for (i = shls_slice[2]; i < shls_slice[3]; i++) { djmax = MAX(djmax, ao_loc[i+1]-ao_loc[i]); } int dij = dimax * djmax; int gblksize = 0xfffffff8 & (bufsize / dij); int gs0, dg; for (gs0 = 0; gs0 < nGv; gs0 += gblksize) { dg = MIN(nGv-gs0, gblksize); for (i = 0; i < 3; i++) { memcpy(sGv+dg*i, Gv+nGv*i+gs0, sizeof(double)*dg); } sGv += dg * 3; if (gxyz != NULL) { for (i = 0; i < 3; i++) { memcpy(sgxyz+dg*i, gxyz+nGv*i+gs0, sizeof(int)*dg); } sgxyz += dg * 3; } } return gblksize; } void PBC_ft_latsum_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(), double complex *out, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; double *sGv = malloc(sizeof(double) * nGv * 3); int *sgxyz = NULL; if (gxyz != NULL) { sgxyz = malloc(sizeof(int) * nGv * 3); } int blksize; if (fill == &PBC_ft_fill_nk1s1 || fill == &PBC_ft_fill_nk1s2 || fill == &PBC_ft_fill_nk1s1hermi) { blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX*IMGBLK/2, shls_slice, ao_loc, atm, natm, bas, nbas, env); } else { blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX, shls_slice, ao_loc, atm, natm, bas, nbas, env); } int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_aopair, eval_gz, fill, out, nkpts, comp, nimgs, \ Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv,\ atm, natm, bas, nbas, env, blksize) { int i, j, ij; int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env); nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env)); double *env_loc = malloc(sizeof(double)*nenv); memcpy(env_loc, env, sizeof(double)*nenv); size_t count = nkpts + IMGBLK; double complex *buf = malloc(sizeof(double complex)*count*INTBUFMAX*comp); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_aopair, eval_gz, out, nkpts, comp, nimgs, blksize, i, j, buf, env_loc, Ls, expkL, shls_slice, ao_loc, sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(sGv); if (sgxyz != NULL) { free(sgxyz); } }
GB_binop__bxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint32) // A*D function (colscale): GB (_AxD__bxor_uint32) // D*A function (rowscale): GB (_DxB__bxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint32) // C=scalar+B GB (_bind1st__bxor_uint32) // C=scalar+B' GB (_bind1st_tran__bxor_uint32) // C=A+scalar GB (_bind2nd__bxor_uint32) // C=A'+scalar GB (_bind2nd_tran__bxor_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
init.c
/* This file contains functions to initialize arrays and data structures used in the pf3d kernels. */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "mytypes.h" #include "runparm.h" #include "lecuyer.h" #include "util.h" #include "time.h" #include "light.h" #include "pf3d_fft.h" #include "pf3dbench.h" #include "pf3dbenchvars.h" #include "check.h" double t0_lo, t2_lo, t0_hi, t2_hi; double denlw_lo, denlw_hi; long nbig; rcomplex *t0_big, *t0_big_sav; /* tN_new has no guard zones but tvar has one in x and y. */ #define tN_new(a,b,c) tN_new[CELTNDX3(a,b,c)] #define tvar(a,b,c) tvar[CELTNDX(a,b,c)] #define tvar_sav(a,b,c) tvar_sav[CELTNDX(a,b,c)] #define t0_sav(a,b,c) t0_sav[CELTNDX(a,b,c)] #define t2_sav(a,b,c) t2_sav[CELTNDX(a,b,c)] void safe_free(void *var) { if(var) free(var); } rcomplex *make_wave(double tvar_lo, double tvar_hi) { int ii, i, j, k, ibase, jbase; rcomplex *tvar; tvar= init_complex_ramp(nxl+1, nyl+1, nzl, tvar_lo, tvar_hi); return tvar; } void free_wave(rcomplex *tvar) { safe_free(tvar); } void copy_wave(rcomplex *tvar, rcomplex *tvar_sav) { int ix, iy, iz; #ifdef _OPENMP #pragma omp for private(ix, iy, iz) #endif for(iz= 0; iz < nzl; iz++) { for (iy=0; iy<nyl; iy++) { for (ix=0; ix<nxl; ix++) { tvar(ix, iy, iz)= tvar_sav(ix, iy, iz); } } } } void copy_to_tN(rcomplex * restrict tvar, rcomplex * restrict tN_new) { int ix, iy, iz; #ifdef _OPENMP #pragma omp for private(ix, iy, iz) #endif for(iz= 0; iz < nzl; iz++) { for (iy=0; iy<nyl; iy++) { for (ix=0; ix<nxl; ix++) { tN_new(ix, iy, iz)= tvar(ix, iy, iz); } } } } void copy_from_tN(rcomplex * restrict tN_new, rcomplex * restrict tvar) { int ix, iy, iz; #ifdef _OPENMP #pragma omp for private(ix, iy, iz) #endif for(iz= 0; iz < nzl; iz++) { for (iy=0; iy<nyl; iy++) { for (ix=0; ix<nxl; ix++) { tvar(ix, iy, iz)= tN_new(ix, iy, iz); } } } } void copy_arr2d(real *arr_old, real *arr_new) { long i; for(i= 0; i < nplng; i++) { arr_new[i]= arr_old[i]; } } void copy_arr3d(real *arr_old, real *arr_new) { long i; for(i= 0; i < ngtot; i++) { arr_new[i]= arr_old[i]; } } void copy_carr3d(rcomplex *arr_old, rcomplex *arr_new) { long i; for(i= 0; i < ngtot; i++) { arr_new[i]= arr_old[i]; } } void do_init(int nxl_in, int nyl_in, int nzl_in, int nthr_in) { int i, ntot, izz, isign; int idir; /* +1=forward, -1=backward */ long nmx; /* mfh data */ int ii, jj, kk, ijk, maxfftlen; long nrzone; int nxp2, nyp2, nzp2, xyplane, xzplane, yzplane, buffer_size; double t0big, t2big; int ix, iy, iz; long memtot; /* set the maximum number of threads that can be used during this run */ #ifdef _OPENMP /* use the same number of threads for all functions */ num_thr= nthr_in; if(num_thr > 0) omp_set_num_threads(num_thr); omp_maxthreads= omp_get_max_threads(); #else num_thr= 1; omp_maxthreads= 1; #endif if(mp_rank == 0) printf("Running with a maximum of %d threads\n", omp_maxthreads); nxl= nxl_in; nyl= nyl_in; nzl= nzl_in; t0_lo= 0.25; t0_hi= 0.75; t2_lo= 3.3e-4; t2_hi= 6.5e-4; denlw_lo= 1.8e-4; denlw_hi= 2.8e-4; ngrd= 1; nxa= nxl+ngrd+ngrd; nya= nyl+ngrd+ngrd; nza= nzl+ngrd+ngrd; nxg0= 1; nxg1= nxl; nyg0= 1; nyg1= nyl; nzg0= 1; nzg1= nzl; /* (2*nx, 2*ny, nz) is the total number of zones across all domains. (nxl, nyl, nzl) is the number of zones in a single domain. The grid is decomposed into (mp_p, mp_q, mp_r) domains. The only place that nx, ny are used is in 2D FFTs. The caller passes nxl by nyl arrays into an FFT and gets nxl by nyl results back. Even code that operates in wavenumber space operates on nxl by nyl arrays. */ #ifdef USE_MPI nx= mp_p*nxl/2; ny= mp_q*nyl/2; nz= mp_r*nzl-1; #else nx= nxl/2; ny= nyl/2; nz= nzl-1; #endif isign= 1; idir= 1; #if 0 isubcycle= 50; dthyd= 0.208; dt= dthyd/isubcycle; #endif dx= 12.57; dy= 12.57; dz= 20.17; lx= nx*dx; ly= ny*dy; lz= (nz+1)*dz; ntot= nxl*nyl*nzl; ngtot= nxa*nya*nza; nplng= nxa*nya; ntheta= 4*2*nplng; /* allocate space for random number generator before initializng any variables. */ nmx= ngtot; if(nza > nmx) nmx= nza; if(nplng > nmx) nmx= nplng; nmaxpln= nxa*nya; if(nmaxpln < nxa*nza) nmaxpln= nxa*nza; if(nmaxpln < nya*nza) nmaxpln= nya*nza; tmp_dbcom= wmalloc(sizeof(double complex)*ngtot); nxp2 = nxl + 2 * ngrd+1; nyp2 = nyl + 2 * ngrd+1; nzp2 = nzl + 2 * ngrd + 1; xyplane = nxp2 * nyp2; xzplane = nxp2 * nzp2; yzplane = nyp2 * nzp2; /* In earlier versions of pF3D, FFT message passing only required a buffer big enough to hold one xy-plane. The port to GPUs required operating on all xy-planes simultaneously, so now need to make a 3D temporary. */ buffer_size = nxp2*nyp2*nzp2*sizeof(rcomplex); iptmp = wmalloc(buffer_size); optmp = wmalloc(buffer_size); afftbuf = wmalloc(buffer_size); sndbuf = wmalloc(buffer_size/2); rcvbuf = wmalloc(buffer_size/2); if(!mp_rank) printf("sndbuf is %d bytes\n", buffer_size/2); /* initialize GPU buffer arrays for use in 2D FFTs */ #ifdef OMP45_BUILD init_gpubuf(ngtot); #endif /* thetb is planar in pF3D. Make it 3D here to enable more parallelism. */ thetb= init_real(ngtot, 0.0, 0.523599); /* make planes with guard zones */ theta= wmalloc(sizeof(real)*4*2*nplng); t0= make_wave(t0_lo, t0_hi); t0_sav= make_wave(t0_lo, t0_hi); t2= make_wave(t2_lo, t2_hi); t2_sav= make_wave(t2_lo, t2_hi); tN_new= make_wave(t0_lo, t0_hi); /* save the initial state of the light waves */ copy_wave(t0_sav, t0); copy_wave(t2_sav, t2); denlw= make_wave(denlw_lo, denlw_hi); if(mp_rank == 0) puts("variable initialization complete\n"); if(mp_rank == 0) printf("total bytes allocated= %e\n", 1.0*totalloc); } void do_cleanup(void) { safe_free(theta); if(t0) free_wave(t0); t0= 0; if(t2) free_wave(t2); t2= 0; if(t0_sav) free_wave(t0_sav); t0_sav= 0; if(t2_sav) free_wave(t2_sav); t2_sav= 0; if(tN_new) free_wave(tN_new); tN_new= 0; }
StreamTriad_par6.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include "timer.h" int main(int argc, char *argv[]){ int nsize = 20000000, ntimes=16; double *a = omp_target_alloc(nsize*sizeof(double), omp_get_default_device()); double *b = omp_target_alloc(nsize*sizeof(double), omp_get_default_device()); double *c = omp_target_alloc(nsize*sizeof(double), omp_get_default_device()); struct timespec tstart; // initializing data and arrays double scalar = 3.0, time_sum = 0.0; #pragma omp target teams distribute parallel for simd is_device_ptr(a, b) for (int i=0; i<nsize; i++) { a[i] = 1.0; b[i] = 2.0; } for (int k=0; k<ntimes; k++){ cpu_timer_start(&tstart); // stream triad loop #pragma omp target teams distribute parallel for simd is_device_ptr(a, b, c) for (int i=0; i<nsize; i++){ c[i] = a[i] + scalar*b[i]; } time_sum += cpu_timer_stop(tstart); } printf("Average runtime for stream triad loop is %lf msecs\n", time_sum/ntimes); omp_target_free(a, omp_get_default_device()); omp_target_free(b, omp_get_default_device()); omp_target_free(c, omp_get_default_device()); return(0); }
dgemm.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> extern void dgemm_(char*, char*, int*, int*, int*, double*, double*, int*, double*, int*, double*, double*, int*); int main(int argc, char* argv[]) { double alpha = 1.2; double beta = 1.0e-3; double gflop; double time_avg; int m = 1024; int n = 2048; int k = 512; int LOOP_COUNT = 10; int i; char ta='N'; char tb='N'; struct timeval start_time, end_time, duration[LOOP_COUNT]; #ifndef HOST_NAME_MAX #define HOST_NAME_MAX sysconf (_SC_HOST_NAME_MAX) #endif char hostname[HOST_NAME_MAX]; gethostname(hostname, sizeof(hostname)); if (argc >= 2) m = atoi(argv[1]); if (argc >= 3) n = atoi(argv[2]); if (argc >= 4) k = atoi(argv[3]); if (argc >= 5) LOOP_COUNT = atoi(argv[4]); double perf[LOOP_COUNT]; double time[LOOP_COUNT]; double* A = (double*)malloc(sizeof(double)*m*k); double* B = (double*)malloc(sizeof(double)*k*n); double* C = (double*)malloc(sizeof(double)*m*n); printf("%s: Size of Matrix A(mxk)\t\t:\t%d x %d\n", hostname, m, k); printf("%s: Size of Matrix B(kxn)\t\t:\t%d x %d\n", hostname, k, n); printf("%s: Size of Matrix C(mxn)\t\t:\t%d x %d\n", hostname, m, n); printf("%s: LOOP COUNT\t\t\t:\t%d \n", hostname, LOOP_COUNT); printf("\n"); #pragma omp parallel for for (i=0; i<m*k ; ++i) A[i] = i%3+1; #pragma omp parallel for for (i=0; i<k*n ; ++i) B[i] = i%3+1; #pragma omp parallel for for (i=0; i<m*n ; ++i) C[i] = i%3+1; gflop = (2.0 * m * n * k + 3.0 * m * n) * 1E-9; /* CALL DGEMM ONCE TO INITIALIZE THREAD/BUFFER */ dgemm_(&ta, &tb, &m, &n, &k, &alpha, A, &m, B, &k, &beta, C, &m); /* LOOP OVER DGEMM IN ORDER TO SMOOTHEN THE RESULTS */ for (i=0; i<LOOP_COUNT; ++i) { gettimeofday(&start_time, NULL); dgemm_(&ta, &tb, &m, &n, &k, &alpha, A, &m, B, &k, &beta, C, &m); gettimeofday(&end_time,NULL); timersub(&end_time, &start_time, &duration[i]); } time_avg = 0.0; for (i=0; i<LOOP_COUNT; ++i) { time[i] = (duration[i].tv_sec * 1.e3 + duration[i].tv_usec * 1.e-3) * 1.e-3; perf[i] = gflop / time[i]; time_avg += time[i]; printf("%s: Run %d \t\t\t\t:\t%.5f GFlops/sec\n", hostname, i, perf[i]); } printf("\n"); printf("%s: Flops based on given dimensions\t:\t%.5f Gflops\n", hostname, gflop); printf("%s: Avg. performance \t:\t%.5f Gflop/s\n", hostname, gflop * LOOP_COUNT / time_avg); printf("%s: Avg. time / DGEMM operation\t:\t%f secs \n", hostname, time_avg / LOOP_COUNT); printf("%s: Time for %d DGEMM operations\t:\t%f secs \n", hostname, LOOP_COUNT, time_avg); printf("\n"); return 0; }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "ps/ps.h" #include "mxnet/kvstore.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" #include <utility> //pair #include <algorithm> // sort #include <cmath> namespace mxnet { namespace kvstore { enum class CommandType { kController, kStopServer, kSyncMode, kSetGradientCompression }; enum class DataHandleType { kDefaultPushPull, kCompressedPushPull, kRowSparsePushPull }; /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<float>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct MergeBuf { std::vector<ps::KVMeta> request; NDArray array; }; struct Dist2TMean { public: real_t value_; real_t dist_; Dist2TMean():value_(0.0),dist_(0.0) { } Dist2TMean(real_t value, real_t dist):value_(value),dist_(dist) { } ~Dist2TMean() { } }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); if (recved_type == CommandType::kStopServer) { exec_.Stop(); } else if (recved_type == CommandType::kSyncMode) { sync_mode_ = true; } else if (recved_type == CommandType::kSetGradientCompression) { gradient_compression_->DecodeParams(recved.body); } else { // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); } app->Response(recved); } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<real_t>& req_data, ps::KVServer<real_t>* server) { DataHandleType recved_type = static_cast<DataHandleType>(req_meta.cmd); if (recved_type == DataHandleType::kRowSparsePushPull) { DataHandleRowSparse(req_meta, req_data, server); } else if (recved_type == DataHandleType::kCompressedPushPull) { DataHandleCompressed(req_meta, req_data, server); } else { DataHandleDefault(req_meta, req_data, server); } return; } inline void ApplyUpdates(const int key, MergeBuf *merged, NDArray *stored, ps::KVServer<real_t>* server) { if (merged->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python if (updater_) { exec_.Exec([this, key, merged, stored](){ CHECK(updater_); updater_(key, merged->array, stored); }); } else { // if no updater, just copy CopyFromTo(merged->array, stored); } if (log_verbose_) { LOG(INFO) << "sync response to " << merged->request.size() << " workers"; } for (const auto& req : merged->request) { server->Response(req); } merged->request.clear(); stored->WaitToRead(); } else { merged->array.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void DataHandleRowSparse(const ps::KVMeta& req_meta, const ps::KVPairs<real_t>& req_data, ps::KVServer<real_t>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); real_t* data = req_data.vals.data(); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context()); Engine::Get()->PushAsync( [recved, stored](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(), recved.data().FlatTo1D<cpu, float>(), s); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); stored.WaitToRead(); server->Response(req_meta); return; } // synced push if (sync_mode_) { if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys; auto& merged = merge_buf_[master_key]; /* merged type: struct MergeBuf { std::vector<ps::KVMeta> request; NDArray array; }; */ if (merged.array.is_none()) { merged.array = NDArray(kRowSparseStorage, stored.shape(), Context()); } if (num_rows == 0) { // reset to zeros if (merged.request.size() == 0) { merged.array = NDArray(kRowSparseStorage, stored.shape(), Context()); } else { // nothing to aggregate } merged.request.push_back(req_meta); ApplyUpdates(master_key, &merged, &stored, server); return; } auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (merged.request.size() == 0) { CopyFromTo(recved, &merged.array, 0); } else { NDArray out(kRowSparseStorage, stored.shape(), Context()); std::vector<Engine::VarHandle> const_vars; const_vars.push_back(recved.var()); const_vars.push_back(merged.array.var()); // accumulate row_sparse gradients // TODO(haibin) override + operator for row_sparse NDArray // instead of calling BinaryComputeRspRsp directly using namespace mshadow; Engine::Get()->PushAsync( [recved, merged, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {recved, merged.array}, {kWriteTo}, {out}); on_complete(); }, recved.ctx(), const_vars, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &merged.array, 0); } merged.request.push_back(req_meta); ApplyUpdates(master_key, &merged, &stored, server); } else { // async push if (log_verbose_) LOG(INFO) << "async push: " << master_key; if (num_rows == 0) { server->Response(req_meta); return; } auto unit_len = req_data.lens[1]; CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; TShape dshape(ds, ds + 2); TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*) NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); exec_.Exec([this, master_key, &recved, &stored](){ CHECK(updater_); updater_(master_key, recved, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<real_t> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const float* data = stored.data().dptr<float>(); auto len = unit_len * num_rows; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_len; auto begin = (i - 1) * unit_len; auto end = i * unit_len; response.vals.segment(begin, end).CopyFrom(src, unit_len); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } } void DefaultStorageResponse(int key, const NDArray& stored, const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { ps::KVPairs<real_t> response; CHECK(!stored.is_none()) << "init " << key << " first"; auto len = stored.shape().Size(); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1]}; TShape dshape(ds, ds + 1); TBlob recv_blob((real_t*) req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = merge_buf_[key]; if (merged.array.is_none()) { merged.array = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.array, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.array += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(key, &merged, &stored, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(key, store_[key], req_meta, req_data, server); } } /** struct KVPairs { // /** \brief empty constructor // KVPairs() {} /** \brief the list of keys SArray<Key> keys; /** \brief the according values SArray<Val> vals; /** \brief the according value lengths (could be empty) SArray<int> lens; }; */ /* /** \brief meta information about a kv request struct KVMeta { /** \brief the int cmd int cmd; /** \brief whether or not this is a push request bool push; /** \brief sender's node id int sender; /** \brief the associated timestamp int timestamp; /** \brief the customer id of worker int customer_id; }; */ typedef std::pair<int, double> PAIR; void getSortedScoreVector (const std::vector<ps::KVPairs<real_t>> &alldata_v, std::vector<PAIR> &idx_score_vec) { int nd_size = alldata_v[0].lens[0]; for (int i = 0; i < alldata_v.size(); i++) { real_t* a1 = (real_t*)alldata_v[i].vals.data(); real_t score = 0; for (int j = 0; j < alldata_v.size(); j++) { if (i == j) continue; real_t* a2 = (real_t*)alldata_v[j].vals.data(); // calculate distance NDArray for (int n = 0; n < nd_size; n++) { score += (a1[n] - a2[n])*(a1[n] - a2[n]); } } // store <index, score> pair into vector<int> idx_score_vec.push_back(std::make_pair(i, score)); } // sort vector std::sort(idx_score_vec.begin(), idx_score_vec.end(), [](const PAIR &x, const PAIR &y) -> int { return x.second < y.second; }); } void Krum(const std::vector<ps::KVPairs<real_t>> &alldata_v, real_t* res_sum, int byzt_num) { // calculate score and create pair CHECK_GT(ps::NumWorkers()-byzt_num-2, 0) << "number of byzantine node is too big!"; std::vector<PAIR> idx_score_vec(0); getSortedScoreVector(alldata_v, idx_score_vec); int nd_size = alldata_v[0].lens[0]; // construct recved for (int i = 0; i < ps::NumWorkers() - 2 - byzt_num; i++) { //ps::NumWorkers()-2-byt_num real_t* ad = (real_t*)alldata_v[idx_score_vec[i].first].vals.data(); for (int j = 0; j < nd_size; j++) { // sz == req_data.vals.size() res_sum[j] += ad[j]; } } // scale the array for (int j = 0; j < nd_size; j++) { // sz == req_data.vals.size() res_sum[j] *= ps::NumWorkers(); res_sum[j] /= ps::NumWorkers() - 2 - byzt_num; } } void TrimmedMean(const std::vector<ps::KVPairs<real_t>> &alldata_v, real_t* res_sum, int byzt_num) { CHECK_GT(ps::NumWorkers() - 2 * byzt_num, 0) << "number of byzantine node is too big!"; int nd_size = alldata_v[0].lens[0]; int count = ps::NumWorkers()- 2 * byzt_num; for (int dim = 0; dim < nd_size; dim++) { std::vector<double> one_dim_vec(0); for (int i = 0; i < ps::NumWorkers(); i++) { real_t* data = (real_t*)alldata_v[i].vals.data(); one_dim_vec.push_back(data[dim]); } std::sort(one_dim_vec.begin(), one_dim_vec.end()); // sum up b-trimmed for (int k = byzt_num; k < ps::NumWorkers() - byzt_num; k++) { res_sum[dim] += one_dim_vec[k]; } // calculate mean res_sum[dim] /= count; } } bool CompareByDist (const Dist2TMean &i, const Dist2TMean &j) { return (i.dist_ < j.dist_); } void CongAlgo(const std::vector<ps::KVPairs<real_t>> &alldata_v, real_t* res_sum, int byzt_num) { CHECK_GT(ps::NumWorkers() - 2 * byzt_num, 0) << "number of byzantine node is too big!"; int nd_size = alldata_v[0].lens[0]; int trimmedcount = ps::NumWorkers()- 2 * byzt_num; int count = ps::NumWorkers() - byzt_num; for (int dim = 0; dim < nd_size; dim++) { std::vector<double> one_dim_vec(0); for (int i = 0; i < ps::NumWorkers(); i++) { real_t* data = (real_t*)alldata_v[i].vals.data(); one_dim_vec.push_back(data[dim]); } std::sort(one_dim_vec.begin(), one_dim_vec.end()); // calculate b-trimmed mean real_t btmean = 0; for (int k = byzt_num; k < ps::NumWorkers() - byzt_num; k++) { btmean += one_dim_vec[k]; } btmean /= trimmedcount; // get n-q nearest neighbors std::vector<Dist2TMean> dist_vec(0); for (auto one_dim_data : one_dim_vec) { Dist2TMean p(one_dim_data, abs(one_dim_data - btmean)); dist_vec.push_back(p); } std::sort(dist_vec.begin(), dist_vec.end(), [](const Dist2TMean &x, const Dist2TMean &y) { return x.dist_ < y.dist_; }); for (int i = 0; i < ps::NumWorkers() - byzt_num; i++) { res_sum[dim] += dist_vec[i].value_; } res_sum[dim] /= count; } } // namespace mxnet void DataHandleDefault(const ps::KVMeta& req_meta, const ps::KVPairs<real_t> &req_data, ps::KVServer<real_t>* server) { CHECK_EQ(req_meta.cmd, static_cast<int>(DataHandleType::kDefaultPushPull)); // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { if (stored.is_none()) { size_t ds[] = {(size_t)req_data.lens[0]}; TShape dshape(ds, ds + 1); TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored // initialization stored = NDArray(dshape, Context()); CopyFromTo(recved, &stored, 0); server->Response(req_meta); // ? stored.WaitToRead(); } else if (sync_mode_) { /* ------ baseline------- // synced push -- use merfed_buf_:It represents values from different workers being merged. size_t ds[] = {(size_t)req_data.lens[0]}; TShape dshape(ds, ds + 1); auto& merged = merge_buf_[key]; if (merged.array.is_none()) { merged.array = NDArray(dshape, Context()); // Context()-cpu/gpu } uint byzt_num = 3; if (merged.request.size() < byzt_num) { real_t* b1 = (real_t*)req_data.vals.data(); for (uint n = 0; n < req_data.vals.size(); n++) { b1[n] *= -100; } } TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored if (merged.request.size() == 0) { CopyFromTo(recved, &merged.array, 0); } else { merged.array += recved; } merged.request.push_back(req_meta); ApplyUpdates(key, &merged, &stored, server); --------- baseline---------- */ auto& merged = merge_buf_[key]; merged.request.push_back(req_meta); auto& alldata_v = all_push_buf_[key]; alldata_v.push_back(req_data); // NDArray one_array = NDArray(dshape, Context()); // CopyFromTo(recved, &one_array, 0); // push_vector.push_back(one_array); // if (push_vector.size() < (size_t) ps::NumWorkers()){ // one_array.WaitToRead(); // } // initialize merged.array if (alldata_v.size() == (size_t) ps::NumWorkers()){ if (merged.array.is_none()) { size_t ds[] = {(size_t)alldata_v[0].lens[0]}; TShape dshape(ds, ds + 1); merged.array = NDArray(dshape, Context()); // Context()-cpu/gpu } // calculate similarity score for each data using every pair real_t* res_sum; res_sum = (real_t*)calloc(alldata_v[0].vals.size(), sizeof(real_t)); // size-bzt_num-2 // artificial byzantine by multiplying the first array by 5 real_t* a1 = (real_t*)alldata_v[0].vals.data(); // real_t* a3 = (real_t*)alldata_v[2].vals.data(); // real_t* a5 = (real_t*)alldata_v[4].vals.data(); for (int n = 0; n < alldata_v[0].vals.size(); n++) { a1[n] *= -100; // a3[n] *= -90; // a5[n] *= -110; } int byzt_num = 1; // ------ KRUM --------- // Krum(alldata_v, res_sum, byzt_num); // ------ TrimmedMean --------- TrimmedMean(alldata_v, res_sum, byzt_num); // ------ CongAlgo ----- // CongAlgo(alldata_v, res_sum, byzt_num); // ------- test failure case with no Krum ------- // int nd_size = alldata_v[0].lens[0]; // for (int i = 0; i < ps::NumWorkers(); i++) { //ps::NumWorkers()-2-byt_num // real_t* ad = (real_t*)alldata_v[i].vals.data(); // for (int j = 0; j < nd_size; j++) { // sz == req_data.vals.size() // res_sum[j] += ad[j]; // } // } size_t ds[] = {(size_t)alldata_v[0].lens[0]}; TShape dshape(ds, ds + 1); TBlob recv_blob(res_sum, dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored CopyFromTo(recved, &merged.array, 0); ApplyUpdates(key, &merged, &stored, server); alldata_v.clear(); } } else { // async push size_t ds[] = {(size_t)req_data.lens[0]}; TShape dshape(ds, ds + 1); TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*) dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); // received data needed to pushed to stored exec_.Exec([this, key, &recved, &stored](){ CHECK(updater_); updater_(key, recved, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull DefaultStorageResponse(key, stored, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } // belows are also private variables /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, MergeBuf> merge_buf_; /** * \brief all_push_buf_ is a buffer used if sync_mode is true. It represents * all pushed data from all workers in a single iteration. The store will be * updated to an aggregated value when values from all workers are pushed * into this buffer. */ std::unordered_map<int, std::vector<ps::KVPairs<real_t>>> all_push_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<float>* ps_server_; // whether to LOG verbose information bool log_verbose_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
parfor.h
// Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PARFOR_H_ #define PARFOR_H_ #include <omp.h> #include <cstdint> #include <utility> #include <vector> namespace qsim { /** * Helper struct for executing for-loops in parallel across multiple threads. */ template <uint64_t MIN_SIZE> struct ParallelForT { explicit ParallelForT(unsigned num_threads) : num_threads(num_threads) {} // GetIndex0 and GetIndex1 are useful when we need to know how work was // divided between threads, for instance, for reusing partial sums obtained // by RunReduceP. uint64_t GetIndex0(uint64_t size, unsigned thread_id) const { return size >= MIN_SIZE ? size * thread_id / num_threads : 0; } uint64_t GetIndex1(uint64_t size, unsigned thread_id) const { return size >= MIN_SIZE ? size * (thread_id + 1) / num_threads : size; } template <typename Function, typename... Args> void Run(uint64_t size, Function&& func, Args&&... args) const { if (num_threads > 1 && size >= MIN_SIZE) { #pragma omp parallel num_threads(num_threads) { unsigned n = omp_get_num_threads(); unsigned m = omp_get_thread_num(); uint64_t i0 = GetIndex0(size, m); uint64_t i1 = GetIndex1(size, m); for (uint64_t i = i0; i < i1; ++i) { func(n, m, i, args...); } } } else { for (uint64_t i = 0; i < size; ++i) { func(1, 0, i, args...); } } } template <typename Function, typename Op, typename... Args> std::vector<typename Op::result_type> RunReduceP( uint64_t size, Function&& func, Op&& op, Args&&... args) const { std::vector<typename Op::result_type> partial_results; if (num_threads > 1 && size >= MIN_SIZE) { partial_results.resize(num_threads, 0); #pragma omp parallel num_threads(num_threads) { unsigned n = omp_get_num_threads(); unsigned m = omp_get_thread_num(); uint64_t i0 = GetIndex0(size, m); uint64_t i1 = GetIndex1(size, m); typename Op::result_type partial_result = 0; for (uint64_t i = i0; i < i1; ++i) { partial_result = op(partial_result, func(n, m, i, args...)); } partial_results[m] = partial_result; } } else if (num_threads > 0) { typename Op::result_type result = 0; for (uint64_t i = 0; i < size; ++i) { result = op(result, func(1, 0, i, args...)); } partial_results.resize(1, result); } return partial_results; } template <typename Function, typename Op, typename... Args> typename Op::result_type RunReduce(uint64_t size, Function&& func, Op&& op, Args&&... args) const { auto partial_results = RunReduceP(size, func, std::move(op), args...); typename Op::result_type result = 0; for (auto partial_result : partial_results) { result = op(result, partial_result); } return result; } unsigned num_threads; }; using ParallelFor = ParallelForT<1024>; } // namespace qsim #endif // PARFOR_H_
functor.h
/*! * Copyright (c) 2019 by Contributors * \file kernel/cpu/functor.h * \brief Functors for template on CPU */ #ifndef DGL_KERNEL_CPU_FUNCTOR_H_ #define DGL_KERNEL_CPU_FUNCTOR_H_ #include <dmlc/omp.h> #include <algorithm> #include "../binary_reduce_common.h" namespace dgl { namespace kernel { // Reducer functor specialization template <typename DType> struct ReduceSum<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp atomic *addr += val; } static DType BackwardCall(DType val, DType accum) { return 1; } }; template <typename DType> struct ReduceMax<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp critical *addr = std::max(*addr, val); } static DType BackwardCall(DType val, DType accum) { return static_cast<DType>(val == accum); } }; template <typename DType> struct ReduceMin<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp critical *addr = std::min(*addr, val); } static DType BackwardCall(DType val, DType accum) { return static_cast<DType>(val == accum); } }; template <typename DType> struct ReduceProd<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp atomic *addr *= val; } static DType BackwardCall(DType val, DType accum) { return accum / val; } }; template <typename DType> struct ReduceNone<kDLCPU, DType> { static void Call(DType* addr, DType val) { *addr = val; } static DType BackwardCall(DType val, DType accum) { return 1; } }; } // namespace kernel } // namespace dgl #endif // DGL_KERNEL_CPU_FUNCTOR_H_
bisectingKmeans.c
/* Kalign - a multiple sequence alignment program Copyright 2006, 2019 Timo Lassmann This file is part of kalign. Kalign is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. */ #ifdef HAVE_OPENMP #include <omp.h> #endif #include <xmmintrin.h> #include "tlrng.h" #include "msa.h" #include "bisectingKmeans.h" #include "global.h" #include "msa.h" #include "aln_task.h" #include "sequence_distance.h" #include "euclidean_dist.h" /* #include "alignment.h" */ #include "pick_anchor.h" #include "esl_stopwatch.h" struct node{ struct node* left; struct node* right; int id; int d; }; struct kmeans_result{ int* sl; int* sr; int nl; int nr; float score; }; static struct kmeans_result* alloc_kmeans_result(int num_samples); static void free_kmeans_results(struct kmeans_result* k); struct node* upgma(float **dm,int* samples, int numseq); struct node* alloc_node(void); int label_internal(struct node*n, int label); //int label_internal(struct node*n, int label); int* readbitree(struct node* p,int* tree); /* void print_tree(struct node*n, struct aln_tasks *t) ; */ static void create_tasks(struct node*n, struct aln_tasks* t); static int sort_tasks_by_priority(const void *a, const void *b); /* void printTree(struct node* curr,int depth); */ //struct node* bisecting_kmeans(struct msa* msa, struct node* n, float** dm,int* samples,int numseq, int num_anchors,int num_samples,struct rng_state* rng); static struct node* bisecting_kmeans(struct msa* msa, struct node* n, float** dm,int* samples,int numseq, int num_anchors,int num_samples,struct rng_state* rng, int d); int build_tree_kmeans(struct msa* msa, struct aln_param* ap,struct aln_tasks** tasks) { //struct drand48_data randBuffer; struct aln_tasks* t = NULL; struct node* root = NULL; float** dm = NULL; int* samples = NULL; int* anchors = NULL; int num_anchors; int numseq; int i; ASSERT(msa != NULL, "No alignment."); //ASSERT(param != NULL, "No input parameters."); ASSERT(ap != NULL, "No alignment parameters."); t = *tasks; numseq = msa->numseq; DECLARE_TIMER(timer); /* pick anchors . */ // LOG_MSG("Calculating pairwise distances"); START_TIMER(timer); // LOG_MSG("pick_anchor"); RUNP(anchors = pick_anchor(msa, &num_anchors)); // LOG_MSG("d_estimation"); // fprintf(stdout,"anchors:%d,num_anchors:%d\n",*anchors,num_anchors); RUNP(dm = d_estimation(msa, anchors, num_anchors,0));//les,int pair) // LOG_MSG("done"); STOP_TIMER(timer); GET_TIMING(timer); //LOG_MSG("Done in %f sec.", GET_TIMING(timer)); MFREE(anchors); MMALLOC(samples, sizeof(int)* numseq); for(i = 0; i < numseq;i++){ samples[i] = i; } //RUNP(root = alloc_node()); START_TIMER(timer); // LOG_MSG("Building guide tree."); #ifdef HAVE_OPENMP /* omp_set_num_threads(4); */ #pragma omp parallel // Only the first thread will spawn other threads #pragma omp single nowait { #endif root = bisecting_kmeans(msa,root, dm, samples, numseq, num_anchors, numseq, ap->rng,0); #ifdef HAVE_OPENMP } #endif STOP_TIMER(timer); GET_TIMING(timer); //LOG_MSG("Done in %f sec.", GET_TIMING(timer)); label_internal(root, numseq); create_tasks(root, t); qsort(t->list, t->n_tasks, sizeof(struct task*), sort_tasks_by_priority); /* for(i = 0; i < t->n_tasks;i++){ */ /* fprintf(stdout,"%3d %3d -> %3d (p: %d)\n", t->list[i]->a, t->list[i]->b, t->list[i]->c, t->list[i]->p); */ /* } */ //*task_list = t; /*exit(0); ap->tree[0] = 1; ap->tree = readbitree(root, ap->tree); for (i = 0; i < (numseq*3);i++){ tree[i] = tree[i+1]; }*/ MFREE(root); for(i =0 ; i < msa->numseq;i++){ _mm_free(dm[i]); } MFREE(dm); DESTROY_TIMER(timer); return OK; ERROR: return FAIL; } struct node* bisecting_kmeans(struct msa* msa, struct node* n, float** dm,int* samples,int numseq, int num_anchors,int num_samples,struct rng_state* rng, int d) { struct kmeans_result* res_tmp = NULL; struct kmeans_result* best = NULL; struct kmeans_result* res_ptr = NULL; int tries = 50; int t_iter; int r; int* sl = NULL; int* sr = NULL; int num_l,num_r; float* w = NULL; float* wl = NULL; float* wr = NULL; float* cl = NULL; float* cr = NULL; float dl = 0.0F; float dr = 0.0F; float score; int i,j,s; int num_var; int stop = 0; if(num_samples < 100){ float** dm = NULL; RUNP(dm = d_estimation(msa, samples, num_samples,1));// anchors, num_anchors,1)); n = upgma(dm,samples, num_samples); gfree(dm); MFREE(samples); return n; } num_var = num_anchors / 8; if( num_anchors%8){ num_var++; } num_var = num_var << 3; wr = _mm_malloc(sizeof(float) * num_var,32); wl = _mm_malloc(sizeof(float) * num_var,32); cr = _mm_malloc(sizeof(float) * num_var,32); cl = _mm_malloc(sizeof(float) * num_var,32); RUNP(best = alloc_kmeans_result(num_samples)); RUNP(res_tmp = alloc_kmeans_result(num_samples)); best->score = FLT_MAX; tries = MACRO_MIN(tries, num_samples); for(t_iter = 0;t_iter < tries;t_iter++){ res_tmp->score = FLT_MAX; sl = res_tmp->sl; sr = res_tmp->sr; w = _mm_malloc(sizeof(float) * num_var,32); for(i = 0; i < num_var;i++){ w[i] = 0.0F; wr[i] = 0.0F; wl[i] = 0.0F; cr[i] = 0.0F; cl[i] = 0.0F; } for(i = 0; i < num_samples;i++){ s = samples[i]; for(j = 0; j < num_anchors;j++){ w[j] += dm[s][j]; } } for(j = 0; j < num_anchors;j++){ w[j] /= (float)num_samples; } //r = tl_random_int(rng , num_samples); //r = sel[t_iter]; r = t_iter; s = samples[r]; //LOG_MSG("Selected %d\n",s); for(j = 0; j < num_anchors;j++){ cl[j] = dm[s][j]; } for(j = 0; j < num_anchors;j++){ cr[j] = w[j] - (cl[j] - w[j]); // fprintf(stdout,"%f %f %f\n", cl[j],cr[j],w[j]); } _mm_free(w); /* check if cr == cl - we have identical sequences */ s = 0; for(j = 0; j < num_anchors;j++){ if(fabsf(cl[j]-cr[j]) > 1.0E-6){ s = 1; break; } } if(!s){ score = 0.0F; num_l = 0; num_r = 0; sl[num_l] = samples[0]; num_l++; for(i =1 ; i <num_samples;i++){ sr[num_r] = samples[i]; num_r++; } }else{ w = NULL; while(1){ stop++; if(stop == 10000){ ERROR_MSG("Failed."); } num_l = 0; num_r = 0; for(i = 0; i < num_anchors;i++){ wr[i] = 0.0F; wl[i] = 0.0F; } score = 0.0f; for(i = 0; i < num_samples;i++){ s = samples[i]; #ifdef HAVE_AVX2 edist_256(dm[s], cl, num_anchors, &dl); edist_256(dm[s], cr, num_anchors, &dr); #else edist_serial(dm[s], cl, num_anchors, &dl); edist_serial(dm[s], cr, num_anchors, &dr); #endif score += MACRO_MIN(dl,dr); if(dr < dl){ w = wr; sr[num_r] = s; num_r++; }else if (dr > dl){ w = wl; sl[num_l] = s; num_l++; }else{ if(i & 1){ w = wr; sr[num_r] = s; num_r++; }else{ w = wl; sl[num_l] = s; num_l++; } } for(j = 0; j < num_anchors;j++){ w[j] += dm[s][j]; } } for(j = 0; j < num_anchors;j++){ wl[j] /= (float)num_l; wr[j] /= (float)num_r; } s = 0; for(j = 0; j < num_anchors;j++){ if(wl[j] != cl[j]){ s = 1; break; } if(wr[j] != cr[j]){ s = 1; break; } } if(s){ w = cl; cl = wl; wl = w; w = cr; cr = wr; wr = w; }else{ break; } } } res_tmp->nl = num_l; res_tmp->nr = num_r; res_tmp->score = score; if(res_tmp->score < best->score){ //LOG_MSG("Better!!! %f %f", res_tmp->score,best->score); res_ptr = res_tmp; res_tmp = best; best = res_ptr; } } free_kmeans_results(res_tmp); sl = best->sl; sr = best->sr; num_l = best->nl; num_r = best->nr; MFREE(best); _mm_free(wr); _mm_free(wl); _mm_free(cr); _mm_free(cl); MFREE(samples); n = alloc_node(); //LOG_MSG("Done"); #ifdef HAVE_OPENMP #pragma omp task shared(n) if(numseq > 2000) #endif n->left = bisecting_kmeans(msa,n->left, dm, sl, numseq, num_anchors, num_l,rng,d); #ifdef HAVE_OPENMP #pragma omp task shared(n) if(numseq > 2000) #endif n->right = bisecting_kmeans(msa,n->right, dm, sr, numseq, num_anchors, num_r,rng,d); #ifdef HAVE_OPENMP #pragma omp taskwait #endif return n; ERROR: return NULL; } struct node* upgma(float **dm,int* samples, int numseq) { struct node** tree = NULL; struct node* tmp = NULL; int i,j; int *as = NULL; float max; int node_a = 0; int node_b = 0; int cnode = numseq; int numprofiles; numprofiles = (numseq << 1) - 1; MMALLOC(as,sizeof(int)*numseq); for (i = numseq; i--;){ as[i] = i+1; } MMALLOC(tree,sizeof(struct node*)*numseq); for (i = 0;i < numseq;i++){ tree[i] = NULL; tree[i] = alloc_node(); tree[i]->id = samples[i]; } while (cnode != numprofiles){ max = FLT_MAX; for (i = 0;i < numseq-1; i++){ if (as[i]){ for ( j = i + 1;j < numseq;j++){ if (as[j]){ if (dm[i][j] < max){ max = dm[i][j]; node_a = i; node_b = j; } } } } } tmp = NULL; tmp = alloc_node(); tmp->left = tree[node_a]; tmp->right = tree[node_b]; tree[node_a] = tmp; tree[node_b] = NULL; /*deactivate sequences to be joined*/ as[node_a] = cnode+1; as[node_b] = 0; cnode++; /*calculate new distances*/ for (j = numseq;j--;){ if (j != node_b){ dm[node_a][j] = (dm[node_a][j] + dm[node_b][j])*0.5F + 0.001F; } //fprintf(stdout,"\n"); } dm[node_a][node_a] = 0.0F; for (j = numseq;j--;){ dm[j][node_a] = dm[node_a][j]; } } tmp = tree[node_a]; MFREE(tree); MFREE(as); return tmp; ERROR: return NULL; } struct node* alloc_node(void) { struct node* n = NULL; MMALLOC(n, sizeof(struct node)); n->left = NULL; n->right = NULL; n->id = -1; n->d = 0; return n; ERROR: return NULL; } int label_internal(struct node*n, int label) { //n->d = d; if(n->left){ label = label_internal(n->left, label); } if(n->right){ label = label_internal(n->right, label); } if(n->left && n->right){ n->d = MACRO_MAX(n->left->d,n->right->d) + 1; } if(n->id == -1){ n->id = label; label++; } return label; } void create_tasks(struct node*n, struct aln_tasks* t) { int i; for(i = 0; i < n->d;i++){ //fprintf(stdout," "); } if(n->left && n->right){ struct task*task; task = t->list[t->n_tasks]; task->a = n->left->id; task->b = n->right->id; task->c = n->id; task->p = n->d; //fprintf(stdout,"Node %d %d depends on %d %d \n", n->id, n->d , n->left->id, n->right->id); t->n_tasks++; } if(n->left){ create_tasks(n->left,t); } if(n->right){ create_tasks(n->right,t); } if(n->left){ if(n->right){ MFREE(n->left); MFREE(n->right); } } } int* readbitree(struct node* p,int* tree) { if(p->left){ tree = readbitree(p->left,tree); } if(p->right){ tree = readbitree(p->right,tree); } if(p->left){ if(p->right){ tree[tree[0]] = p->left->id; tree[tree[0]+1] = p->right->id; tree[tree[0]+2] = p->id; tree[0] +=3; MFREE(p->left); MFREE(p->right); } } return tree; } struct kmeans_result* alloc_kmeans_result(int num_samples) { struct kmeans_result* k = NULL; ASSERT(num_samples != 0, "No samples???"); MMALLOC(k, sizeof(struct kmeans_result)); k->nl = 0; k->nr = 0; k->sl = NULL; k->sr = NULL; MMALLOC(k->sl, sizeof(int) * num_samples); MMALLOC(k->sr, sizeof(int) * num_samples); k->score = FLT_MAX; return k; ERROR: free_kmeans_results(k); return NULL; } void free_kmeans_results(struct kmeans_result* k) { if(k){ if(k->sl){ MFREE(k->sl); } if(k->sr){ MFREE(k->sr); } MFREE(k); } } int sort_tasks_by_priority(const void *a, const void *b) { struct task* const *one = a; struct task* const *two = b; if((*one)->p >= (*two)->p){ return 1; }else{ return -1; } }
residualbased_newton_raphson_contact_strategy.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "custom_python/process_factory_utility.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace> SolvingStrategyType; typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ProcessFactoryUtility::Pointer ProcessesListType; typedef std::size_t IndexType; /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart), mpMyProcesses(nullptr), mpPostProcesses(nullptr) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); // Auxiliar assign mConvergenceCriteriaEchoLevel = BaseType::mpConvergenceCriteria->GetEchoLevel(); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : BaseType(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename SolvingStrategyType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY // Auxiliar zero array const array_1d<double, 3> zero_array = ZeroVector(3); // Set to zero the weighted gap ModelPart& r_model_part = StrategyBaseType::GetModelPart(); NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); const bool frictional = r_model_part.Is(SLIP); // We predict contact pressure in case of contact problem if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array); if (frictional) { VariableUtils().SetVariable(WEIGHTED_SLIP, zero_array, nodes_array); } // Compute the current gap ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // We predict a contact pressure ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const std::size_t step = r_process_info[STEP]; if (step == 1) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT); } } else { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1)); } } } // BaseType::Predict(); // NOTE: May cause problems in dynamics!!! // // // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated // ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); // // // We predict contact pressure in case of contact problem // if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { // VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, nodes_array); // // // Compute the current gap // ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // // // We predict a contact pressure // ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); // const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY]; // // // We iterate over the nodes // bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true; // // #pragma omp parallel for // for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { // auto it_node = nodes_array.begin() + i; // // const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); // // const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter; // // if (current_gap < 0.0) { // it_node->Set(ACTIVE, true); // if (is_components) { // it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap; // } else { // const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL); // it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal; // } // } // } // } KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; BaseType::Initialize(); mFinalizeWasPerformed = false; // Initializing NL_ITERATION_NUMBER ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); r_process_info[NL_ITERATION_NUMBER] = 1; KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Add something if necessary return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::mpConvergenceCriteria->SetEchoLevel(0); BaseType::InitializeSolutionStep(); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); mFinalizeWasPerformed = false; } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; if (mFinalizeWasPerformed == false) { BaseType::FinalizeSolutionStep(); // To avoid compute twice the FinalizeSolutionStep mFinalizeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; // bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations // bool is_converged = BaseSolveSolutionStep(); // Direct solution bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); if (r_model_part.IsNot(INTERACTION)) { // We get the system TSystemMatrixType& A = *BaseType::mpA; TSystemVectorType& Dx = *BaseType::mpDx; TSystemVectorType& b = *BaseType::mpb; // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); int inner_iteration = 0; while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; r_process_info[INNER_LOOP_ITERATION] = inner_iteration; is_converged = BaseSolveSolutionStep(); // We check the convergence BaseType::mpConvergenceCriteria->SetEchoLevel(0); is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { // We compute the base loop r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1; is_converged = BaseSolveSolutionStep(); } if (mThisParameters["adaptative_strategy"].GetBool()) { if (!is_converged) { is_converged = AdaptativeStep(); } } return is_converged; KRATOS_CATCH(""); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_contact_strategy", "adaptative_strategy" : false, "split_factor" : 10.0, "max_number_splits" : 3, "inner_loop_iterations" : 5 })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters // ADAPTATIVE STRATEGY PARAMETERS bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed ProcessesListType mpMyProcesses; /// The processes list ProcessesListType mpPostProcesses; /// The post processes list // OTHER PARAMETERS int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // Copy the parameters mThisParameters = ThisParameters; } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool BaseSolveSolutionStep() { KRATOS_TRY; // Pointers needed in the solution ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cicle IndexType iteration_number = 1; r_process_info[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // We do a geometry check before solve the system for first time if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { // Initialisation of the convergence criteria BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb); if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } // Iteration Cicle... performed only for NonLinearProblems while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) { //setting the number of iteration r_process_info[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) { if( BaseType::GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; //std::cout << "mb is calculated" << std::endl; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0) MaxIterationsExceeded(); // Recalculate residual if needed // (note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; KRATOS_CATCH(""); } /** * @brief This method performs the adaptative step */ bool AdaptativeStep() { KRATOS_TRY; bool is_converged = false; // Plots a warning if the maximum number of iterations is exceeded if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl; if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl; ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later int split_number = 0; // We iterate until we reach the convergence or we split more than desired while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) { // Expliting time step as a way to try improve the convergence split_number += 1; double aux_delta_time, current_time; const double aux_time = SplitTimeStep(aux_delta_time, current_time); current_time += aux_delta_time; bool inside_the_split_is_converged = false; IndexType inner_iteration = 0; while (current_time <= aux_time) { inner_iteration += 1; r_process_info[STEP] += 1; if (inner_iteration == 1) { if (StrategyBaseType::MoveMeshFlag()) UnMoveMesh(); NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; it_node->OverwriteSolutionStepData(1, 0); // it_node->OverwriteSolutionStepData(2, 1); } r_process_info.SetCurrentTime(current_time); // Reduces the time step FinalizeSolutionStep(); } else { NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) (nodes_array.begin() + i)->CloneSolutionStepData(); r_process_info.CloneSolutionStepInfo(); r_process_info.ClearHistory(r_model_part.GetBufferSize()); r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step } // We execute the processes before the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteInitializeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteInitializeSolutionStep(); // In order to initialize again everything BaseType::mInitializeWasPerformed = false; mFinalizeWasPerformed = false; // We repeat the solve with the new DELTA_TIME this->Initialize(); this->InitializeSolutionStep(); this->Predict(); inside_the_split_is_converged = BaseType::SolveSolutionStep(); this->FinalizeSolutionStep(); // We execute the processes after the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteFinalizeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteFinalizeSolutionStep(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteBeforeOutputStep(); if (mpPostProcesses != nullptr) mpPostProcesses->PrintOutput(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteAfterOutputStep(); current_time += aux_delta_time; } if (inside_the_split_is_converged) is_converged = true; } // Plots a warning if the maximum number of iterations and splits are exceeded if (is_converged == false) MaxIterationsAndSplitsExceeded(); // Restoring original DELTA_TIME r_process_info[DELTA_TIME] = original_delta_time; return is_converged; KRATOS_CATCH(""); } /** * @brief Here the database is updated * @param A The LHS matrix * @param Dx The increment of solution after solving system * @param b The RHS vector * @param MoveMesh The flag that tells if the mesh should be moved */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { BaseType::UpdateDatabase(A,Dx,b,MoveMesh); // TODO: Add something if necessary } /** * @brief his method checks if there is no element inverted */ bool CheckGeometryInverted() { ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); bool inverted_element = false; ElementsArrayType& elements_array = r_model_part.Elements(); // NOT OMP for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) { auto it_elem = elements_array.begin() + i; auto& geom = it_elem->GetGeometry(); if (geom.DeterminantOfJacobian(0) < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(geom.DeterminantOfJacobian(0)) } return true; } // We check now the deformation gradient std::vector<Matrix> deformation_gradient_matrices; it_elem->CalculateOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info); for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) { const double det_f = MathUtils<double>::Det(deformation_gradient_matrices[i_gp]); if (det_f < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(det_f) } return true; } } } return inverted_element; } /** * @brief Here the time step is splitted * @param AuxDeltaTime The new delta time to be considered * @param CurrentTime The current time * @return The destination time */ double SplitTimeStep( double& AuxDeltaTime, double& CurrentTime ) { KRATOS_TRY; const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME]; CurrentTime = aux_time - AuxDeltaTime; StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one AuxDeltaTime /= mThisParameters["split_factor"].GetDouble(); StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time CoutSplittingTime(AuxDeltaTime, aux_time); return aux_time; KRATOS_CATCH(""); } /** * This method moves bak the mesh to the previous position */ void UnMoveMesh() { KRATOS_TRY; if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false) KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl; NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates(); noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1); } KRATOS_CATCH(""); } /** * @brief This method prints information after solving the problem */ void CoutSolvingProblem() { if (mConvergenceCriteriaEchoLevel != 0) { std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl; } } /** * @brief This method prints information after split the increment of time * @param AuxDeltaTime The new time step to be considered * @param AuxTime The destination time */ void CoutSplittingTime( const double AuxDeltaTime, const double AuxTime ) { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; std::cout.precision(4); std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl; std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl; std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl; std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations */ void MaxIterationsExceeded() override { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations and splits */ void MaxIterationsAndSplitsExceeded() { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
blackberry_ES10_fmt_plug.c
/* Cracker for BlackBerry Enterprise Server 10 hashes. * * Thanks to Nicolas RUFF for providing the algorithm details and sample * hashes! * * USE BDSMgmt; * SELECT LoginPassword FROM EASUsers; * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_blackberry1; #elif FMT_REGISTERS_H john_register_one(&fmt_blackberry1); #else #include <string.h> #include <errno.h> #include "sha2.h" #include "arch.h" //#undef _OPENMP //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> // OMP_SCALE tests (intel core i7) // 8 - 77766 // 64 - 80075 // 128 - 82016 -test=0 is still almost instant. // 256 - 81753 // 512 - 80537 #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #endif #include "memdbg.h" #define FORMAT_TAG "$bbes10$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define FORMAT_LABEL "Blackberry-ES10" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA-512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT " (101x)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define MAX_SALT_SIZE 64 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests blackberry_tests[] = { {"$bbes10$76BDF6BE760FCF5DEE7B20E27632D1FEDD9D64E1BBCC941F42957E87CBFB96F176324B2E2C71976CEBE67CA6F400F33F001D7453D80F4AF5D80C8A93ED0BA0E6$DB1C19C0", "toulouse"}, {"$bbes10$57ECCAA65BB087E3E506A8C5CEBEE193DD051538CE44F4156D65F1B44E0266DF49337EA11812DF12E39C8B12EB46F19C291FD9529CD4F09B3C8109BE6F4861E5$0wzWUnuQ", "test"}, {"$bbes10$217A6A0646ACF599B5A05A3D2B47F96B576353C74E4D28E857A476EFDFB36B27930FEDAA8064FFD17F36C7C854BED49FF95029B3310434BB2D05524043AE6E44$A5Dr4lXa", "ripper"}, {"$bbes10$DE1A954989FFED2D74900463A1AD7B14D852164D84AA0443F0EC59A0875A911C92CEF73E7C082B13864132644FA49DFEBDCF1D2DA0C9711CD4DC348A855F7285$MnphRIkf", "superbadPass"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int iterations; unsigned char salt[MAX_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; if (0 < strlen(ctcopy) && '$' == ctcopy[strlen(ctcopy) - 1]) /* Can not end with '$' */ goto err; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if (!ishexuc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) > MAX_SALT_SIZE) goto err; p = strtokm(NULL, "$"); if (p) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); p = strrchr(ciphertext, '$') + 1; strcpy((char*)cs.salt, p); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; int i; char *p = ciphertext + FORMAT_TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { int j; SHA512_CTX ctx; #ifdef SIMD_COEF_64 unsigned int i; unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[128]; uint64_t *keys64, *tmpBuf64=(uint64_t*)tmpBuf, *p64; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys64 = (uint64_t*)keys; memset(keys, 0, 128*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt)); SHA512_Final(tmpBuf, &ctx); p64 = &keys64[i%SIMD_COEF_64+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; for (j = 0; j < 8; ++j) p64[j*SIMD_COEF_64] = JOHNSWAP64(tmpBuf64[j]); p64[8*SIMD_COEF_64] = 0x8000000000000000ULL; p64[15*SIMD_COEF_64] = 0x200; } for (j = 0; j < 98; j++) SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); // Last one with FLAT_OUT SIMDSHA512body(keys, (uint64_t*)crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt)); SHA512_Final((unsigned char *)crypt_out[index], &ctx); /* now "h" (crypt_out[index] becomes our input * total SHA-512 calls => 101 */ for (j = 0; j < 99; j++) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], 64); SHA512_Final((unsigned char *)crypt_out[index], &ctx); } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void blackberry_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_blackberry1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, blackberry_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, blackberry_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
timers.h
double start_time[64], elapsed[64]; #ifdef _OMP #pragma omp threadprivate (start_time, elapsed) #endif
test.c
#include <stdlib.h> #include <stdio.h> #pragma omp requires unified_shared_memory int main(int argc, char *argv[]) { int dep = 0; #pragma omp target device(2) nowait map(tofrom: dep) depend(out: dep) { dep++; } #pragma omp target device(3) nowait map(tofrom: dep) depend(in: dep) { dep++; } #pragma omp taskwait if (dep == 2) { printf("completed with 0 errors\n"); } else { printf("completed with a error:\n"); printf("dep should be 2, but is %d\n", dep); } return EXIT_SUCCESS; }
mkl_functions-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2019 by Contributors * \file mkl_functions-inl.h * \brief Wrapper for MKL VML functions * \author Tao Lv, Shufan Wu */ #ifndef MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_ #define MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_ #if MSHADOW_USE_MKL == 1 #include "mkl_vml.h" namespace mxnet { namespace op { namespace mkl_func { MSHADOW_XINLINE static bool check_size(const size_t n) { const size_t MKL_INT_MAX = (sizeof(MKL_INT) == sizeof(int)) ? INT_MAX : LLONG_MAX; return (n <= MKL_INT_MAX); } MSHADOW_XINLINE static bool check_type(const int t) { return (t == mshadow::kFloat32 || t == mshadow::kFloat64); } #define MXNET_MKL_UNARY_MATH_FUNC(name, func) \ struct name { \ MSHADOW_XINLINE static void Vectorize(const index_t n, const float *src, float *dst) { \ vs##func(static_cast<MKL_INT>(n), src, dst); \ } \ MSHADOW_XINLINE static void Vectorize(const index_t n, const double *src, double *dst) { \ vd##func(static_cast<MKL_INT>(n), src, dst); \ } \ }; #define MXNET_MKL_BINARY_MATH_FUNC(name, func) \ struct name { \ MSHADOW_XINLINE static void Vectorize(const index_t n, \ const float *a, \ const float *b, \ float *c) { \ vs##func(static_cast<MKL_INT>(n), a, b, c); \ } \ MSHADOW_XINLINE static void Vectorize(const index_t n, \ const double *a, \ const double *b, \ double *c) { \ vd##func(static_cast<MKL_INT>(n), a, b, c); \ } \ }; MXNET_MKL_UNARY_MATH_FUNC(erf, Erf); MXNET_MKL_UNARY_MATH_FUNC(exp, Exp); MXNET_MKL_UNARY_MATH_FUNC(exp2, Exp2); MXNET_MKL_UNARY_MATH_FUNC(exp10, Exp10); MXNET_MKL_UNARY_MATH_FUNC(expm1, Expm1); MXNET_MKL_UNARY_MATH_FUNC(log, Ln); MXNET_MKL_UNARY_MATH_FUNC(log2, Log2); MXNET_MKL_UNARY_MATH_FUNC(log10, Log10); MXNET_MKL_UNARY_MATH_FUNC(log1p, Log1p); MXNET_MKL_UNARY_MATH_FUNC(sin, Sin); MXNET_MKL_UNARY_MATH_FUNC(cos, Cos); MXNET_MKL_UNARY_MATH_FUNC(tan, Tan); MXNET_MKL_UNARY_MATH_FUNC(asin, Asin); MXNET_MKL_UNARY_MATH_FUNC(acos, Acos); MXNET_MKL_UNARY_MATH_FUNC(atan, Atan); MXNET_MKL_UNARY_MATH_FUNC(sinh, Sinh); MXNET_MKL_UNARY_MATH_FUNC(cosh, Cosh); MXNET_MKL_UNARY_MATH_FUNC(tanh, Tanh); MXNET_MKL_UNARY_MATH_FUNC(asinh, Asinh); MXNET_MKL_UNARY_MATH_FUNC(acosh, Acosh); MXNET_MKL_UNARY_MATH_FUNC(atanh, Atanh); MXNET_MKL_UNARY_MATH_FUNC(sqrt, Sqrt); MXNET_MKL_UNARY_MATH_FUNC(abs, Abs); MXNET_MKL_UNARY_MATH_FUNC(cbrt, Cbrt); MXNET_MKL_UNARY_MATH_FUNC(round, Round); MXNET_MKL_UNARY_MATH_FUNC(ceil, Ceil); MXNET_MKL_UNARY_MATH_FUNC(floor, Floor); MXNET_MKL_UNARY_MATH_FUNC(trunc, Trunc); MXNET_MKL_UNARY_MATH_FUNC(lgamma, LGamma); MXNET_MKL_UNARY_MATH_FUNC(tgamma, TGamma); MXNET_MKL_UNARY_MATH_FUNC(square, Sqr); MXNET_MKL_BINARY_MATH_FUNC(add, Add); MXNET_MKL_BINARY_MATH_FUNC(sub, Sub); MXNET_MKL_BINARY_MATH_FUNC(mul, Mul); MXNET_MKL_BINARY_MATH_FUNC(pow, Pow); MXNET_MKL_BINARY_MATH_FUNC(hypot, Hypot); template <typename DType> MSHADOW_XINLINE static void sum_(index_t n, DType *in, DType *dst) { DType sum = 0.0f; for (index_t i = 0; i < n; i++) sum += in[i]; dst[0] = sum; } // LayerNorm on the last dimension template <typename DType> MSHADOW_XINLINE static void LayerNormLastDim(index_t m, index_t n, DType *a, DType *b, DType *gamma, DType *beta, DType *mean, DType *var, DType eps) { auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(nthreads) for (index_t i = 0; i < m; i++) { DType* in_offset = a + i * n; DType* out_offset = b + i * n; DType x_sum = 0.0f; DType x_square_sum = 0.0f; #if !defined(_MSC_VER) #pragma omp simd #endif for (index_t j = 0; j < n; j++) { x_sum += in_offset[j]; x_square_sum += in_offset[j] * in_offset[j]; } mean[i] = x_sum / n; var[i] = math::sqrt(x_square_sum / n - mean[i] * mean[i] + eps); #if !defined(_MSC_VER) #pragma omp simd #endif for (index_t j = 0; j < n; j++) { out_offset[j] = (in_offset[j] - mean[i]) * gamma[j] / var[i] + beta[j]; } } } } // namespace mkl_func } // namespace op } // namespace mxnet #endif // MSHADOW_USE_MKL == 1 #endif // MXNET_OPERATOR_MKL_FUNCTIONS_INL_H_
original.h
#ifndef _ORIGINAL_H_ #define _ORIGINAL_H_ #include <assert.h> #include <iostream> #include <malloc.h> #include <memory> #include "omp.h" #include <stdlib.h> #include <time.h> #include <vector> using namespace std; void origin_conv(float *In, float *Ker, float *Out, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh) { #pragma omp parallel for collapse(5) for (int b = 0; b < Nb; b++) { for (int t = 0; t < Nt; t++) { for (int x = 0; x < Nx; x++) { for (int y = 0; y < Ny; y++) { for (int s = 0; s < Ns; s++) { for (int w = 0; w < Nw; w++) { for (int h = 0; h < Nh; h++) { Out[b * Nt * Nx * Ny + t * Nx * Ny + x * Ny + y] += In[b * Ns * (Nx + Nw - 1) * (Ny + Nh - 1) + s * (Nx + Nw - 1) * (Ny + Nh - 1) + (x + w) * (Ny + Nh - 1) + (y + h)] * Ker[t * Ns * Nw * Nh + s * Nw * Nh + w * Nh + h]; } } } } } } } } void compare(float *C1, float *C2, int size) { cout << "comparing" << endl; for (int i = 0; i < size; i++) { if (C1[i] != C2[i]) { cout << "data at " << i << " C1=" << C1[i] << ", C2=" << C2[i] << endl; exit(1); } } cout << "fin compare\n"; } inline void partial_ukr(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int xy = 0; xy < Rxy; xy++) { for (int w = 0; w < Rw; w++) { for (int h = 0; h < Rh; h++) { for (int t = 0; t < Rt; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt]; // if(Coff + t * Nx * Ny + xy==12540){ // cout<<"C12540 use A"<<s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)+Aoff<<" and B"<<t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt+Boff<<endl; // } } } } } } } inline void partial_ukr_peelker_16(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int xy = 0; xy < Rxy; xy++) { for (int w = 0; w < Rw; w++) { for (int h = 0; h < Rh; h++) { for (int t = 0; t < Rt; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[(t&15) + s * Nw * Nh * 16 + w * Nh*16 + h*16 + (t>>4)*Ns*Nw*Nh*16]; // B[t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt]; // if(Coff + t * Nx * Ny + xy==12540){ // cout<<"C12540 use A"<<s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)+Aoff<<" and B"<<t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt+Boff<<endl; // } } } } } } } //#endif inline void partial_ukr_peelker_1_16_unroll3x3(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int w = 0; w < 3; w++) { for (int h = 0; h < 3; h++) { for (int xy = 0; xy < 1; xy++) { for (int t = 0; t < 16; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[(t) + s * Nw * Nh * 16 + w * Nh*16 + h*16 ]; }//end t } } } } } inline void partial_ukr_peelker_1_16_unroll1x1(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int w = 0; w < 1; w++) { for (int h = 0; h < 1; h++) { for (int xy = 0; xy < 1; xy++) { for (int t = 0; t < 16; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * B[(t) + s * Nw * Nh * 16 + w * Nh*16 + h*16 ]; }//end t } } } } } //inline void partial_f_13 inline void partial_ukr_peelker_13(float *A, float *B, float *C, long long s_tile, int Rt, int Rxy, int Rw, int Rh, long long int* bcast_stride , int Aoff, int Boff, int Coff, int Nb, int Nt, int Nx, int Ny, int Ns, int Nw, int Nh ) { /* int Nx = 112; */ /* int Ny = 112; */ /* int Nw = 3; */ /* int Nh = 3; */ /* int Nt = 128; */ /* int Ns = 64; */ /* int Nb = 1; */ // cout<<"simu call"<<endl; for (int s = 0; s < s_tile; s++) { for (int xy = 0; xy < Rxy; xy++) { for (int w = 0; w < Rw; w++) { for (int h = 0; h < Rh; h++) { for (int t = 0; t < Rt; t++) { C[t * Nx * Ny + xy] += A[s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)] * // B[(t&15) + s * Nw * Nh * 13 + w * Nh*13 + h*13 + (t>>4)*Ns*Nw*Nh*16]; B[t + s * Nw * Nh * 13 + w * Nh*13 + h*13 ]; // B[t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt]; // if(Coff + t * Nx * Ny + xy==12540){ // cout<<"C12540 use A"<<s * (Nx + Nw -1) * (Ny + Nh -1) + bcast_stride[xy] + h + w * (Ny + Nh-1)+Aoff<<" and B"<<t + s * Nw * Nh * Nt + w * Nh*Nt + h*Nt+Boff<<endl; // } } } } } } } #endif
main.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <limits.h> #include <math.h> #include <time.h> #include <omp.h> #include "hashmap.h" #define BOARD_SIDE 3 // side of the board #define BOARD_LENGTH BOARD_SIDE * BOARD_SIDE // total length of an array representing a board #define MAX_DEPTH 20 // the default maximum depth for the DFS algo #define MAX_LEVEL 20 // the maximum number of OMP levels for recursion #define COLOR 0 // enable the colors in the terminal output #define DEBUG 0 // print the debug output int max_depth; // maximum dpeth for the dfs algorithm int best_depth; // the current depth where a solution has been found int *best_moves; // the array containing the current best path long iter = 0; // the iterations count /** * Enumeration describing the possible directions */ typedef enum direction direction; enum direction { UP, DOWN, LEFT, RIGHT }; /** * Type describing a move with its distance from the correct solution */ typedef struct { direction dir; int pos; int manhattan_distance; } move; /** * Print the square board * * @param board the board array * @param size the size of the board's side */ void print_board(const int *board, int size) { for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { int b = board[j + i * size]; if (b == 0 && COLOR) printf("\033[1;31m"); printf("%d ", b); if (b == 0 && COLOR) printf("\033[0m"); } printf("\n"); } } /** * Check if the board is a solution. The correct solution is: * * 1 2 3 * 4 5 6 * 7 8 0 * * @param board the board array to check * @return 1 if the solution is correct, 0 otherwise */ int check_solved(const int *board, int size) { if (board[size - 1] != 0) return 0; for (int i = 0; i <= size - 2; ++i) if (board[i] != i + 1) return 0; return 1; } /** * Create a copy of an array and swap two of its elements * @param origin pointer to the original array * @param dest pointer to the destination array * @param size size of the array * @param from first element to swap * @param to second element to swap */ void swap(const int *origin, int *dest, int size, int from, int to) { memcpy(dest, origin, size * sizeof(int)); int temp = dest[to]; dest[to] = dest[from]; dest[from] = temp; } /** * Shuffle an array randomly with a 0 at (0,0) * @param array a pointer to the board to shuffle * @param n the size of the array */ void shuffle(int *array, size_t n) { srand(time(NULL)); if (n > 1) { size_t i; for (i = 1; i < n - 1; i++) { size_t j = rand() % (i + 1); if (j == 0) j = 1; int temp = array[j]; array[j] = array[i]; array[i] = temp; } } } /** * Sum the Manhattan distances for each cell of the board while swapping the cell 0 with the cell direction * @param board pointer to the original array * @param length size of the array * @param side size of one side of the 2d array * @param direction element to swap with the cell 0 * @return Manhattan score */ size_t manhattan_distance(const int board[], const size_t length, const size_t side, int direction) { size_t total = 0; size_t i; for (i = 0; i < length; i++) { // Compute where the element should be int x = (board[i] - 1) % side; int y = (board[i] - 1) / side; // Element 0 goes at the end if(board[i] == 0) { x = BOARD_SIDE - 1; y = BOARD_SIDE - 1; } // Compute where the element is int x2 = i % side; int y2 = i / side; total += abs(x - x2) + abs(y - y2); } return total; } /** * Compare the Manhattan scores of two moves * @param a move 1 * @param b move 2 * @return to comparison between the two scores */ int cmp_manhattan_distances(const void *a, const void *b) { return ((move *) a)->manhattan_distance - ((move *) b)->manhattan_distance; } /** * Solve the 8-puzzle using the recursive depth-first traversal * * @param board the board array * @param depth the current depth of the recursion * @return a boolean indicating if a solution is found (1 for found, 0 otherwise) */ void solve_dfs(int board[], hashmap *hm, int* path, int depth) { #pragma omp atomic update iter++; if (iter % 1000 == 0 && DEBUG) printf("current depth: %d, best depth: %d, iterations: %ld, thread: %d\n", depth, best_depth, iter, omp_get_thread_num()); if (depth >= max_depth || depth >= best_depth) return; if (check_solved(board, BOARD_LENGTH)) { #pragma omp critical { if(best_depth > depth - 1){ best_depth = depth - 1; memcpy(best_moves, path, best_depth * sizeof(int)); } } return; if(DEBUG)printf("solved! Best depth: %d, iterations: %ld, thread: %d\n", best_depth, iter, omp_get_thread_num()); //print_board(board, BOARD_SIDE); } // calculate position of the 0 (empty cell) int pos; for (pos = 0; pos < BOARD_LENGTH; pos++) if (board[pos] == 0) break; // compute the different moves, -1 if not possible move directions[4]; directions[0].pos = pos % BOARD_SIDE == 0 ? -1 : pos - 1; directions[0].dir = LEFT; directions[1].pos = pos % BOARD_SIDE == BOARD_SIDE - 1 ? -1 : pos + 1; directions[1].dir = RIGHT; directions[2].pos = pos - BOARD_SIDE; directions[2].dir = UP; directions[3].pos = pos + BOARD_SIDE; directions[3].dir = DOWN; // compute Manhattan distances for (int i = 0; i < 4; i++) { board[pos] = board[directions[i].pos]; board[directions[i].pos] = 0; directions[i].manhattan_distance = directions[i].pos > 0 && directions[i].pos < BOARD_LENGTH ? manhattan_distance(board, BOARD_LENGTH, BOARD_SIDE, directions[i].pos) : INT_MAX; board[directions[i].pos] = board[pos]; board[pos] = 0; } // sort by manhattan distance qsort(directions, 4, sizeof(move), cmp_manhattan_distances); #pragma omp taskloop shared(hm) firstprivate(board) if(omp_get_level() < MAX_LEVEL) for (int i = 0; i < 4; i++) { int direction = directions[i].pos; if (direction >= 0 && direction < BOARD_LENGTH) { // Create new board and swap the 0 with a possible index int *new_board = malloc(BOARD_LENGTH * sizeof(int)); swap(board, new_board, BOARD_LENGTH, pos, direction); // Create a new path with the new index int* new_path = malloc((depth + 1) * sizeof(int)); memcpy(new_path, path, depth *sizeof(int)); new_path[depth] = directions[i].dir; if(hashmap_insert(hm, new_board, depth)) { solve_dfs(new_board, hm, new_path, depth + 1); } free(new_board); free(new_path); } } } /** * Parse a string of numbers into a usable grid. * @param board the array to fill * @param string the string to convertinto a board */ void parse_board(int *board, const char *string) { int length = strlen(string); int i; for (i = 0; i < length; i++) { board[i] = string[i] - '0'; } } /** * Print a .csv formatted string of the results of the algorithm * * @param time_taken the execution time * @param thread_count the number of threads */ void print_csv_report(double time_taken, int thread_count) { printf("%d;%d;%d;%ld;%f;%d\n", thread_count, best_depth, max_depth, iter, time_taken, best_depth == INT_MAX); } /** * Print a nice output of the results of the algorithm * * @param time_taken the execution time * @param thread_count the number of threads */ void print_text_report(double time_taken, int thread_count) { printf("==================================\n"); if (best_depth != INT_MAX) { printf("Solution found!\nthreads:\t%d\nbest depth:\t%d\nmax depth:\t%d\niterations:\t%ld\ntime(sec):\t%f\npath:\n\n", thread_count, best_depth, max_depth, iter, time_taken); char *dirs[] = {"UP", "DOWN", "LEFT", "RIGHT"}; for(int i = 0; i < best_depth + 1; i++){ printf(" %3d. %s\n",i, dirs[best_moves[i]]); } } else { printf("No solution found.\nthreads:\t%d\nmax depth:\t%d\niterations:\t%ld\ntime(sec):\t%f", thread_count, max_depth, iter, time_taken); } } /** * The program's entry point * @param argc arguments count * @param argv arguments value * @return the return code of the program */ int main(int argc, char const *argv[]) { best_depth = INT_MAX; // parsing the max depth argument if (argc >= 2) max_depth = atoi(argv[1]); else max_depth = MAX_DEPTH; // parsing the board argument int board[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; if (argc >= 3) { if (strlen(argv[2]) != 9) { printf("The given board is incorrect!\n"); exit(-1); } parse_board(board, argv[2]); } else { shuffle(board, (size_t) BOARD_LENGTH); } int thread_count = omp_get_num_procs(); // num of thread if (argc >= 4) thread_count = atoi(argv[3]); int output = 0; // type of output: 0 for text, 1 for csv if (argc >= 5) output = atoi(argv[4]); hashmap *h = hashmap_create(); best_moves = malloc(max_depth * sizeof(int)); if(!output) print_board(board, BOARD_SIDE); long t = clock(); #pragma omp parallel num_threads(thread_count) firstprivate(board) shared(h) { #pragma omp single nowait solve_dfs(board, h, NULL, 0); // start the search } t = clock() - t; double time_taken = ((double) t) / CLOCKS_PER_SEC; // in seconds if (output) { print_csv_report(time_taken, thread_count); } else { print_text_report(time_taken, thread_count); } free(best_moves); hashmap_free(h); return 0; }